diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..30a5d006a78a6de1214906df70eea4dce234dc1a
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,16 @@
+.PHONY: quality style
+
+check_dirs := promptsource
+
+# Check that source code meets quality standards
+
+quality:
+	black --check --line-length 119 --target-version py38 $(check_dirs)
+	isort --check-only $(check_dirs)
+	flake8 $(check_dirs) --max-line-length 119
+
+# Format source code automatically
+
+style:
+	black --line-length 119 --target-version py38 $(check_dirs)
+	isort $(check_dirs)
diff --git a/promptsource/__init__.py b/promptsource/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/promptsource/app.py b/promptsource/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..64460be4e8841aaa9f7cfa904d54a1159ceb7f04
--- /dev/null
+++ b/promptsource/app.py
@@ -0,0 +1,585 @@
+import argparse
+import textwrap
+from multiprocessing import Manager, Pool
+
+import pandas as pd
+import plotly.express as px
+import streamlit as st
+from datasets import get_dataset_infos
+from pygments import highlight
+from pygments.formatters import HtmlFormatter
+from pygments.lexers import DjangoLexer
+
+from promptsource.session import _get_state
+from promptsource.templates import Template, TemplateCollection
+from promptsource.utils import (
+    get_dataset,
+    get_dataset_confs,
+    list_datasets,
+    removeHyphen,
+    renameDatasetColumn,
+    render_features,
+)
+
+
+# add an argument for read-only
+# At the moment, streamlit does not handle python script arguments gracefully.
+# Thus, for read-only mode, you have to type one of the below two:
+# streamlit run promptsource/app.py -- -r
+# streamlit run promptsource/app.py -- --read-only
+# Check https://github.com/streamlit/streamlit/issues/337 for more information.
+parser = argparse.ArgumentParser(description="run app.py with args")
+parser.add_argument("-r", "--read-only", action="store_true", help="whether to run it as read-only mode")
+
+args = parser.parse_args()
+if args.read_only:
+    select_options = ["Helicopter view", "Prompted dataset viewer"]
+    side_bar_title_prefix = "Promptsource (Read only)"
+else:
+    select_options = ["Helicopter view", "Prompted dataset viewer", "Sourcing"]
+    side_bar_title_prefix = "Promptsource"
+
+#
+# Helper functions for datasets library
+#
+get_dataset = st.cache(allow_output_mutation=True)(get_dataset)
+get_dataset_confs = st.cache(get_dataset_confs)
+
+
+def reset_template_state():
+    state.template_name = None
+    state.jinja = None
+    state.reference = None
+
+
+#
+# Loads session state
+#
+state = _get_state()
+
+#
+# Initial page setup
+#
+st.set_page_config(page_title="Promptsource", layout="wide")
+st.sidebar.markdown(
+    "<center><a href='https://github.com/bigscience-workshop/promptsource'>💻Github - Promptsource\n\n</a></center>",
+    unsafe_allow_html=True,
+)
+mode = st.sidebar.selectbox(
+    label="Choose a mode",
+    options=select_options,
+    index=0,
+    key="mode_select",
+)
+st.sidebar.title(f"{side_bar_title_prefix} 🌸 - {mode}")
+
+#
+# Adds pygments styles to the page.
+#
+st.markdown(
+    "<style>" + HtmlFormatter(style="friendly").get_style_defs(".highlight") + "</style>", unsafe_allow_html=True
+)
+
+WIDTH = 80
+
+
+def show_jinja(t, width=WIDTH):
+    wrap = textwrap.fill(t, width=width, replace_whitespace=False)
+    out = highlight(wrap, DjangoLexer(), HtmlFormatter())
+    st.write(out, unsafe_allow_html=True)
+
+
+def show_text(t, width=WIDTH, with_markdown=False):
+    wrap = [textwrap.fill(subt, width=width, replace_whitespace=False) for subt in t.split("\n")]
+    wrap = "\n".join(wrap)
+    if with_markdown:
+        st.write(wrap, unsafe_allow_html=True)
+    else:
+        st.text(wrap)
+
+
+#
+# Loads template data
+#
+try:
+    template_collection = TemplateCollection()
+except FileNotFoundError:
+    st.error(
+        "Unable to find the prompt folder!\n\n"
+        "We expect the folder to be in the working directory. "
+        "You might need to restart the app in the root directory of the repo."
+    )
+    st.stop()
+
+
+if mode == "Helicopter view":
+    st.title("High level metrics")
+    st.write(
+        "If you want to contribute, please refer to the instructions in "
+        + "[Contributing](https://github.com/bigscience-workshop/promptsource/blob/main/CONTRIBUTING.md)."
+    )
+
+    #
+    # Global metrics
+    #
+    counts = template_collection.get_templates_count()
+    nb_prompted_datasets = len(counts)
+    st.write(f"## Number of *prompted datasets*: `{nb_prompted_datasets}`")
+    nb_prompts = sum(counts.values())
+    st.write(f"## Number of *prompts*: `{nb_prompts}`")
+
+    #
+    # Metrics per dataset/subset
+    #
+    # Download dataset infos (multiprocessing download)
+    manager = Manager()
+    all_infos = manager.dict()
+    all_datasets = list(set([t[0] for t in template_collection.keys]))
+
+    def get_infos(d_name):
+        all_infos[d_name] = get_dataset_infos(d_name)
+
+    pool = Pool(processes=len(all_datasets))
+    pool.map(get_infos, all_datasets)
+    pool.close()
+    pool.join()
+
+    results = []
+    for (dataset_name, subset_name) in template_collection.keys:
+        # Collect split sizes (train, validation and test)
+        if dataset_name not in all_infos:
+            infos = get_dataset_infos(dataset_name)
+            all_infos[dataset_name] = infos
+        else:
+            infos = all_infos[dataset_name]
+        if infos:
+            if subset_name is None:
+                subset_infos = infos[list(infos.keys())[0]]
+            else:
+                subset_infos = infos[subset_name]
+
+            split_sizes = {k: v.num_examples for k, v in subset_infos.splits.items()}
+        else:
+            # Zaid/coqa_expanded and Zaid/quac_expanded don't have dataset_infos.json
+            # so infos is an empty dic, and `infos[list(infos.keys())[0]]` raises an error
+            # For simplicity, just filling `split_sizes` with nothing, so the displayed split sizes will be 0.
+            split_sizes = {}
+
+        # Collect template counts, original task counts and names
+        dataset_templates = template_collection.get_dataset(dataset_name, subset_name)
+        results.append(
+            {
+                "Dataset name": dataset_name,
+                "Subset name": "∅" if subset_name is None else subset_name,
+                "Train size": split_sizes["train"] if "train" in split_sizes else 0,
+                "Validation size": split_sizes["validation"] if "validation" in split_sizes else 0,
+                "Test size": split_sizes["test"] if "test" in split_sizes else 0,
+                "Number of prompts": len(dataset_templates),
+                "Number of original task prompts": sum(
+                    [bool(t.metadata.original_task) for t in dataset_templates.templates.values()]
+                ),
+                "Prompt names": [t.name for t in dataset_templates.templates.values()],
+            }
+        )
+    results_df = pd.DataFrame(results)
+    results_df.sort_values(["Number of prompts"], inplace=True, ascending=False)
+    results_df.reset_index(drop=True, inplace=True)
+
+    nb_training_instances = results_df["Train size"].sum()
+    st.write(f"## Number of *training instances*: `{nb_training_instances}`")
+
+    plot_df = results_df[["Dataset name", "Subset name", "Train size", "Number of prompts"]].copy()
+    plot_df["Name"] = plot_df["Dataset name"] + " - " + plot_df["Subset name"]
+    plot_df.sort_values(["Train size"], inplace=True, ascending=False)
+    fig = px.bar(
+        plot_df,
+        x="Name",
+        y="Train size",
+        hover_data=["Dataset name", "Subset name", "Number of prompts"],
+        log_y=True,
+        title="Number of training instances per data(sub)set - y-axis is in logscale",
+    )
+    fig.update_xaxes(visible=False, showticklabels=False)
+    st.plotly_chart(fig, use_container_width=True)
+    st.write(
+        f"- Top 3 training subsets account for `{100*plot_df[:3]['Train size'].sum()/nb_training_instances:.2f}%` of the training instances."
+    )
+    biggest_training_subset = plot_df.iloc[0]
+    st.write(
+        f"- Biggest training subset is *{biggest_training_subset['Name']}* with `{biggest_training_subset['Train size']}` instances"
+    )
+    smallest_training_subset = plot_df[plot_df["Train size"] > 0].iloc[-1]
+    st.write(
+        f"- Smallest training subset is *{smallest_training_subset['Name']}* with `{smallest_training_subset['Train size']}` instances"
+    )
+
+    st.markdown("***")
+    st.write("Details per dataset")
+    st.table(results_df)
+
+else:
+    # Combining mode `Prompted dataset viewer` and `Sourcing` since the
+    # backbone of the interfaces is the same
+    assert mode in ["Prompted dataset viewer", "Sourcing"], ValueError(
+        f"`mode` ({mode}) should be in `[Helicopter view, Prompted dataset viewer, Sourcing]`"
+    )
+
+    #
+    # Loads dataset information
+    #
+
+    dataset_list = list_datasets(
+        template_collection,
+        state,
+    )
+    ag_news_index = dataset_list.index("ag_news")
+
+    #
+    # Select a dataset - starts with ag_news
+    #
+    dataset_key = st.sidebar.selectbox(
+        "Dataset",
+        dataset_list,
+        key="dataset_select",
+        index=ag_news_index,
+        help="Select the dataset to work on.",
+    )
+
+    #
+    # If a particular dataset is selected, loads dataset and template information
+    #
+    if dataset_key is not None:
+
+        #
+        # Check for subconfigurations (i.e. subsets)
+        #
+        configs = get_dataset_confs(dataset_key)
+        conf_option = None
+        if len(configs) > 0:
+            conf_option = st.sidebar.selectbox("Subset", configs, index=0, format_func=lambda a: a.name)
+
+        dataset = get_dataset(dataset_key, str(conf_option.name) if conf_option else None)
+        splits = list(dataset.keys())
+        index = 0
+        if "train" in splits:
+            index = splits.index("train")
+        split = st.sidebar.selectbox("Split", splits, key="split_select", index=index)
+        dataset = dataset[split]
+        dataset = renameDatasetColumn(dataset)
+
+        dataset_templates = template_collection.get_dataset(dataset_key, conf_option.name if conf_option else None)
+
+        template_list = dataset_templates.all_template_names
+        num_templates = len(template_list)
+        st.sidebar.write(
+            "No of prompts created for "
+            + f"`{dataset_key + (('/' + conf_option.name) if conf_option else '')}`"
+            + f": **{str(num_templates)}**"
+        )
+
+        if mode == "Prompted dataset viewer":
+            if num_templates > 0:
+                template_name = st.sidebar.selectbox(
+                    "Prompt name",
+                    template_list,
+                    key="template_select",
+                    index=0,
+                    help="Select the prompt to visualize.",
+                )
+
+            step = 50
+            example_index = st.sidebar.number_input(
+                f"Select the example index (Size = {len(dataset)})",
+                min_value=0,
+                max_value=len(dataset) - step,
+                value=0,
+                step=step,
+                key="example_index_number_input",
+                help="Offset = 50.",
+            )
+        else:  # mode = Sourcing
+            st.sidebar.subheader("Select Example")
+            example_index = st.sidebar.slider("Select the example index", 0, len(dataset) - 1)
+
+            example = dataset[example_index]
+            example = removeHyphen(example)
+
+            st.sidebar.write(example)
+
+        st.sidebar.subheader("Dataset Schema")
+        rendered_features = render_features(dataset.features)
+        st.sidebar.write(rendered_features)
+
+        #
+        # Display dataset information
+        #
+        st.header("Dataset: " + dataset_key + " " + (("/ " + conf_option.name) if conf_option else ""))
+
+        st.markdown(
+            "*Homepage*: "
+            + dataset.info.homepage
+            + "\n\n*Dataset*: https://github.com/huggingface/datasets/blob/master/datasets/%s/%s.py"
+            % (dataset_key, dataset_key)
+        )
+
+        md = """
+        %s
+        """ % (
+            dataset.info.description.replace("\\", "") if dataset_key else ""
+        )
+        st.markdown(md)
+
+        #
+        # Body of the app: display prompted examples in mode `Prompted dataset viewer`
+        # or text boxes to create new prompts in mode `Sourcing`
+        #
+        if mode == "Prompted dataset viewer":
+            #
+            # Display template information
+            #
+            if num_templates > 0:
+                template = dataset_templates[template_name]
+                st.subheader("Prompt")
+                st.markdown("##### Name")
+                st.text(template.name)
+                st.markdown("##### Reference")
+                st.text(template.reference)
+                st.markdown("##### Original Task? ")
+                st.text(template.metadata.original_task)
+                st.markdown("##### Choices in template? ")
+                st.text(template.metadata.choices_in_prompt)
+                st.markdown("##### Metrics")
+                st.text(", ".join(template.metadata.metrics) if template.metadata.metrics else None)
+                st.markdown("##### Answer Choices")
+                if template.get_answer_choices_expr() is not None:
+                    show_jinja(template.get_answer_choices_expr())
+                else:
+                    st.text(None)
+                st.markdown("##### Jinja template")
+                splitted_template = template.jinja.split("|||")
+                st.markdown("###### Input template")
+                show_jinja(splitted_template[0].strip())
+                if len(splitted_template) > 1:
+                    st.markdown("###### Target template")
+                    show_jinja(splitted_template[1].strip())
+                st.markdown("***")
+
+            #
+            # Display a couple (steps) examples
+            #
+            for ex_idx in range(example_index, example_index + step):
+                if ex_idx >= len(dataset):
+                    continue
+                example = dataset[ex_idx]
+                example = removeHyphen(example)
+                col1, _, col2 = st.beta_columns([12, 1, 12])
+                with col1:
+                    st.write(example)
+                if num_templates > 0:
+                    with col2:
+                        prompt = template.apply(example, highlight_variables=False)
+                        if prompt == [""]:
+                            st.write("∅∅∅ *Blank result*")
+                        else:
+                            st.write("Input")
+                            show_text(prompt[0])
+                            if len(prompt) > 1:
+                                st.write("Target")
+                                show_text(prompt[1])
+                st.markdown("***")
+        else:  # mode = Sourcing
+            st.markdown("## Prompt Creator")
+
+            #
+            # Create a new template or select an existing one
+            #
+            col1a, col1b, _, col2 = st.beta_columns([9, 9, 1, 6])
+
+            # current_templates_key and state.templates_key are keys for the templates object
+            current_templates_key = (dataset_key, conf_option.name if conf_option else None)
+
+            # Resets state if there has been a change in templates_key
+            if state.templates_key != current_templates_key:
+                state.templates_key = current_templates_key
+                reset_template_state()
+
+            with col1a, st.form("new_template_form"):
+                new_template_name = st.text_input(
+                    "Create a New Prompt",
+                    key="new_template",
+                    value="",
+                    help="Enter name and hit enter to create a new prompt.",
+                )
+                new_template_submitted = st.form_submit_button("Create")
+                if new_template_submitted:
+                    if new_template_name in dataset_templates.all_template_names:
+                        st.error(
+                            f"A prompt with the name {new_template_name} already exists "
+                            f"for dataset {state.templates_key}."
+                        )
+                    elif new_template_name == "":
+                        st.error("Need to provide a prompt name.")
+                    else:
+                        template = Template(new_template_name, "", "")
+                        dataset_templates.add_template(template)
+                        reset_template_state()
+                        state.template_name = new_template_name
+                else:
+                    state.new_template_name = None
+
+            with col1b, st.beta_expander("or Select Prompt", expanded=True):
+                dataset_templates = template_collection.get_dataset(*state.templates_key)
+                template_list = dataset_templates.all_template_names
+                if state.template_name:
+                    index = template_list.index(state.template_name)
+                else:
+                    index = 0
+                state.template_name = st.selectbox(
+                    "", template_list, key="template_select", index=index, help="Select the prompt to work on."
+                )
+
+                if st.button("Delete Prompt", key="delete_prompt"):
+                    dataset_templates.remove_template(state.template_name)
+                    reset_template_state()
+
+            variety_guideline = """
+            :heavy_exclamation_mark::question:Creating a diverse set of prompts whose differences go beyond surface wordings (i.e. marginally changing 2 or 3 words) is highly encouraged.
+            Ultimately, the hope is that exposing the model to such a diversity will have a non-trivial impact on the model's robustness to the prompt formulation.
+            \r**To get various prompts, you can try moving the cursor along theses axes**:
+            \n- **Interrogative vs affirmative form**: Ask a question about an attribute of the inputs or tell the model to decide something about the input.
+            \n- **Task description localization**: where is the task description blended with the inputs? In the beginning, in the middle, at the end?
+            \n- **Implicit situation or contextualization**: how explicit is the query? For instance, *Given this review, would you buy this product?* is an indirect way to ask whether the review is positive.
+            """
+
+            col1, _, _ = st.beta_columns([18, 1, 6])
+            with col1:
+                if state.template_name is not None:
+                    show_text(variety_guideline, with_markdown=True)
+
+            #
+            # Edit the created or selected template
+            #
+            col1, _, col2 = st.beta_columns([18, 1, 6])
+            with col1:
+                if state.template_name is not None:
+                    template = dataset_templates[state.template_name]
+                    #
+                    # If template is selected, displays template editor
+                    #
+                    with st.form("edit_template_form"):
+                        updated_template_name = st.text_input("Name", value=template.name)
+                        state.reference = st.text_input(
+                            "Prompt Reference",
+                            help="Short description of the prompt and/or paper reference for the prompt.",
+                            value=template.reference,
+                        )
+
+                        # Metadata
+                        state.metadata = template.metadata
+                        state.metadata.original_task = st.checkbox(
+                            "Original Task?",
+                            value=template.metadata.original_task,
+                            help="Prompt asks model to perform the original task designed for this dataset.",
+                        )
+                        state.metadata.choices_in_prompt = st.checkbox(
+                            "Choices in Template?",
+                            value=template.metadata.choices_in_prompt,
+                            help="Prompt explicitly lists choices in the template for the output.",
+                        )
+
+                        # Metrics from here:
+                        # https://github.com/google-research/text-to-text-transfer-transformer/blob/4b580f23968c2139be7fb1cd53b22c7a7f686cdf/t5/evaluation/metrics.py
+                        metrics_choices = [
+                            "BLEU",
+                            "ROUGE",
+                            "Squad",
+                            "Trivia QA",
+                            "Accuracy",
+                            "Pearson Correlation",
+                            "Spearman Correlation",
+                            "MultiRC",
+                            "AUC",
+                            "COQA F1",
+                            "Edit Distance",
+                        ]
+                        # Add mean reciprocal rank
+                        metrics_choices.append("Mean Reciprocal Rank")
+                        # Add generic other
+                        metrics_choices.append("Other")
+                        # Sort alphabetically
+                        metrics_choices = sorted(metrics_choices)
+                        state.metadata.metrics = st.multiselect(
+                            "Metrics",
+                            metrics_choices,
+                            default=template.metadata.metrics,
+                            help="Select all metrics that are commonly used (or should "
+                            "be used if a new task) to evaluate this prompt.",
+                        )
+
+                        # Answer choices
+                        if template.get_answer_choices_expr() is not None:
+                            answer_choices = template.get_answer_choices_expr()
+                        else:
+                            answer_choices = ""
+                        state.answer_choices = st.text_input(
+                            "Answer Choices",
+                            value=answer_choices,
+                            help="A Jinja expression for computing answer choices. "
+                            "Separate choices with a triple bar (|||).",
+                        )
+
+                        # Jinja
+                        state.jinja = st.text_area("Template", height=40, value=template.jinja)
+
+                        # Submit form
+                        if st.form_submit_button("Save"):
+                            if (
+                                updated_template_name in dataset_templates.all_template_names
+                                and updated_template_name != state.template_name
+                            ):
+                                st.error(
+                                    f"A prompt with the name {updated_template_name} already exists "
+                                    f"for dataset {state.templates_key}."
+                                )
+                            elif updated_template_name == "":
+                                st.error("Need to provide a prompt name.")
+                            else:
+                                # Parses state.answer_choices
+                                if state.answer_choices == "":
+                                    updated_answer_choices = None
+                                else:
+                                    updated_answer_choices = state.answer_choices
+
+                                dataset_templates.update_template(
+                                    state.template_name,
+                                    updated_template_name,
+                                    state.jinja,
+                                    state.reference,
+                                    state.metadata,
+                                    updated_answer_choices,
+                                )
+                                # Update the state as well
+                                state.template_name = updated_template_name
+            #
+            # Displays template output on current example if a template is selected
+            # (in second column)
+            #
+            with col2:
+                if state.template_name is not None:
+                    st.empty()
+                    template = dataset_templates[state.template_name]
+                    prompt = template.apply(example)
+                    if prompt == [""]:
+                        st.write("∅∅∅ *Blank result*")
+                    else:
+                        st.write("Input")
+                        show_text(prompt[0], width=40)
+                        if len(prompt) > 1:
+                            st.write("Target")
+                            show_text(prompt[1], width=40)
+
+
+#
+# Must sync state at end
+#
+state.sync()
diff --git a/promptsource/seqio_tasks/__init__.py b/promptsource/seqio_tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3ba7243093e678df937cfb3cd240aed41bd28a8
--- /dev/null
+++ b/promptsource/seqio_tasks/__init__.py
@@ -0,0 +1,3 @@
+"""Tools for loading prompted tasks in seqio."""
+
+from . import tasks, utils
diff --git a/promptsource/seqio_tasks/dataset_subset_template.csv b/promptsource/seqio_tasks/dataset_subset_template.csv
new file mode 100644
index 0000000000000000000000000000000000000000..0358d5202200a16a8f9fe043f3f7fa8aa834269a
--- /dev/null
+++ b/promptsource/seqio_tasks/dataset_subset_template.csv
@@ -0,0 +1,445 @@
+comment,do_eval,skip_train,dataset_subset_template,nontrivial_choices_given,nontrivial_choices_hidden,trivial_choices_given,trivial_choices_hidden,generative_non_true_task,generative_non_true_implausible,generative_true_task,negated_answers,counting,non_true_task_other,awkward_phrasing,ungrammatical,template_bug,long_distance,no_sep_2_sentences,verbose,answer_span_indices,non_natural_language
+,,,adversarial_qa_dbert_adversarial_qa_dbert_1,,,,,,,,,,,,,,,,,,
+,,,adversarial_qa_dbert_adversarial_qa_dbert_10,,,,,,,,,,,,,,,,,True,True
+,,,adversarial_qa_dbert_adversarial_qa_dbert_2,,,,,,,,,,,,,,True,,,,
+,,,adversarial_qa_dbert_adversarial_qa_dbert_3,,,,,,,,,,,,,,,,,,
+,,,adversarial_qa_dbert_adversarial_qa_dbert_4,,,,,True,,,,,,,,,,,,,
+,,,adversarial_qa_dbert_adversarial_qa_dbert_5,,,,,True,,,,,,,,,,,,,
+,,,adversarial_qa_dbert_adversarial_qa_dbert_6,,,,,,,,,,,,,,,,True,,
+,,,adversarial_qa_dbert_adversarial_qa_dbert_7,,,,,,,,,,,,,,,,,True,
+,,,adversarial_qa_dbert_adversarial_qa_dbert_8,,,,,,,,,,,,,,,,,True,
+,,,adversarial_qa_dbert_adversarial_qa_dbert_9,,,,,,,,,,,,,,,,,True,
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_1,,,,,,,,,,,,,,,,,,
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_10,,,,,,,,,,,,,,,,,True,True
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_2,,,,,,,,,,,,,,True,,,,
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_3,,,,,,,,,,,,,,,,,,
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_4,,,,,True,,,,,,,,,,,,,
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_5,,,,,True,,,,,,,,,,,,,
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_6,,,,,,,,,,,,,,,,True,,
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_7,,,,,,,,,,,,,,,,,True,
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_8,,,,,,,,,,,,,,,,,True,
+,,,adversarial_qa_dbidaf_adversarial_qa_dbidaf_9,,,,,,,,,,,,,,,,,True,
+,,,adversarial_qa_droberta_adversarial_qa_droberta_1,,,,,,,,,,,,,,,,,,
+,,,adversarial_qa_droberta_adversarial_qa_droberta_10,,,,,,,,,,,,,,,,,True,True
+,,,adversarial_qa_droberta_adversarial_qa_droberta_2,,,,,,,,,,,,,,True,,,,
+,,,adversarial_qa_droberta_adversarial_qa_droberta_3,,,,,,,,,,,,,,,,,,
+,,,adversarial_qa_droberta_adversarial_qa_droberta_4,,,,,True,,,,,,,,,,,,,
+,,,adversarial_qa_droberta_adversarial_qa_droberta_5,,,,,True,,,,,,,,,,,,,
+,,,adversarial_qa_droberta_adversarial_qa_droberta_6,,,,,,,,,,,,,,,,True,,
+,,,adversarial_qa_droberta_adversarial_qa_droberta_7,,,,,,,,,,,,,,,,,True,
+,,,adversarial_qa_droberta_adversarial_qa_droberta_8,,,,,,,,,,,,,,,,,True,
+,,,adversarial_qa_droberta_adversarial_qa_droberta_9,,,,,,,,,,,,,,,,,True,
+,,,ag_news_classify,,True,,,,,,,,,,,,,,,,
+,,,ag_news_classify_with_choices,True,,,,,,,,,,,,,,,,,
+,,,ag_news_recommend,True,,,,,,,,,,,,,,,,,
+,,,ag_news_which_section,,True,,,,,,,,,,,,,,,,
+,,,ag_news_which_section_choices,True,,,,,,,,,,,,,,,,,
+,,,amazon_polarity_Template_1,,,True,,,,,,,,,,,,,,,
+,,,amazon_polarity_Template_2,,,,True,,,,,,,,,,True,,,,
+,,,amazon_polarity_Template_3,,,,True,,,,,,,,,,,,,,
+,,,amazon_polarity_Template_4,,,,True,,,,,,,,,,True,,,,
+,,,amazon_polarity_Template_5,,,True,,,,,,,,,,,,,,,
+,,,amazon_polarity_Template_6,,,True,,,,,,,,,,,True,,,,
+,True,True,anli_GPT_3_style_r1,True,,,,,,,,,,,,,,,,,
+,True,True,anli_based_on_the_previous_passage_r1,True,,,,,,,,,,,,,,,,,
+,True,True,anli_does_S1_contradict_S2__r1,,,,,,,,True,,True,,,,,,,,
+,True,True,anli_does_S1_entail_S2__r1,True,,,,,,,,,,,,,,,,,
+,True,True,anli_given_does_it_follow_that__r1,True,,,,,,,,,,,,,,,,,
+,True,True,anli_given_it_must_be_true_that__r1,True,,,,,,,,,,,,,,,,,
+,True,True,anli_GPT_3_style_r2,True,,,,,,,,,,,,,,,,,
+,True,True,anli_based_on_the_previous_passage_r2,True,,,,,,,,,,,,,,,,,
+,True,True,anli_does_S1_contradict_S2__r2,,,,,,,,True,,True,,,,,,,,
+,True,True,anli_does_S1_entail_S2__r2,True,,,,,,,,,,,,,,,,,
+,True,True,anli_given_does_it_follow_that__r2,True,,,,,,,,,,,,,,,,,
+,True,True,anli_given_it_must_be_true_that__r2,True,,,,,,,,,,,,,,,,,
+,True,True,anli_GPT_3_style_r3,True,,,,,,,,,,,,,,,,,
+,True,True,anli_based_on_the_previous_passage_r3,True,,,,,,,,,,,,,,,,,
+,True,True,anli_does_S1_contradict_S2__r3,,,,,,,,True,,True,,,,,,,,
+,True,True,anli_does_S1_entail_S2__r3,True,,,,,,,,,,,,,,,,,
+,True,True,anli_given_does_it_follow_that__r3,True,,,,,,,,,,,,,,,,,
+,True,True,anli_given_it_must_be_true_that__r3,True,,,,,,,,,,,,,,,,,
+,,,app_reviews_categorize_rating_using_review,,True,,,,,,,,,,,,,,,,
+,,,app_reviews_convert_to_rating,True,,,,,,,,,,,,,,,,,
+,,,app_reviews_convert_to_star_rating,,,,,,,,,,True,,,,,,,,
+,,,app_reviews_generate_review,,,,,True,True,,,,,,,,,,,,
+,,,ai2_arc_ARC_Challenge_answer_qn,,,,,True,True,,,,,,,,,,,,
+,,,ai2_arc_ARC_Challenge_false,,,,,,,,True,,,,,,,,,,
+,,,ai2_arc_ARC_Challenge_qa_options,True,,,,,,,,,,,,,,,,,
+,,,ai2_arc_ARC_Challenge_test,True,,,,,,,,,,,,,,,,,
+,,,ai2_arc_ARC_Easy_answer_qn,,,,,True,True,,,,,,,,,,,,
+,,,ai2_arc_ARC_Easy_false,,,,,,,,True,,,,,,,,,,
+,,,ai2_arc_ARC_Easy_qa_options,True,,,,,,,,,,,,,,,,,
+,,,ai2_arc_ARC_Easy_test,True,,,,,,,,,,,,,,,,,
+,True,,circa_goldstandard1_judgement,True,,,,,,,,,,True,,,,,,,
+,True,,circa_goldstandard2_judgement,True,,,,,,,,,,True,,,,,,,
+,,,circa_judgement,,True,,,,,,,,True,True,,,,,,,
+,,,circa_possible_qn,,,,,True,,,,,,,,,,,,,
+,,,circa_question_declarative,,,,,,,,,,True,,,,,,,,
+,,,cnn_dailymail_3.0.0_generate_story,,,,,True,,,,,,,,,,,,,
+,,,cnn_dailymail_3.0.0_news_card_view,,,,,,,True,,,,,,,True,,,,
+,,,cnn_dailymail_3.0.0_news_stock,,,,,,,True,,,,,,,True,,,,
+,,,cnn_dailymail_3.0.0_news_summary,,,,,,,True,,,,,,,True,,True,,
+,,,cnn_dailymail_3.0.0_spice_up_story,,,,,True,,,,,,,,,,,,,
+,,,codah_codah_answer_no_option,,True,,,,,,,,,,,,,,,,
+,,,codah_codah_answer_with_option,True,,,,,,,,,,,,,,,,,
+,,,codah_codah_answer_with_option_idx,True,,,,,,,,,,,,,,,,,
+,,,codah_codah_answer_with_option_post,True,,,,,,,,,,,,,,,,,
+,,,codah_codah_choose_from_list,True,,,,,,,,,,,,,,,,,
+,,,codah_codah_finish_from_the_list,True,,,,,,,,,,,,,,,,,
+,,,codah_codah_finish_from_the_list_post,True,,,,,,,,,,,,,,,,,
+,,,codah_codah_finish_pre,,True,,,,,,,,,,,,,,,,
+,,,codah_codah_question_category,,,,,,,,,,True,,,,,,,,
+,,,codah_codah_question_category_bis,,,,,,,,,,True,,,,,,,,
+,,,common_gen_Example_prompt,,,,,,,True,,,,,,,,,,,
+,,,common_gen_Given_concepts,,,,,,,True,,,,,,,,,,,
+,,,common_gen_Put_together,,,,,,,True,,,,,,,,,,,
+,,,common_gen_choice_in_concept_centric_sentence_generation,,,,,,,True,,,,,,,,,,,
+,,,common_gen_sentence_to_concepts,,,,,,,,,,True,,,,,,,,
+,,,cos_e_v1.11_description_question_option_id,True,,,,,,,,,,,,,,,,,
+,,,cos_e_v1.11_description_question_option_text,True,,,,,,,,,,,,,,,,,
+,,,cos_e_v1.11_generate_explanation_given_text,True,,,,,,True,,,,,,True,,,,,
+,,,cos_e_v1.11_generate_explanation_no_given_answer,,True,,,,,True,,,,,,,,,,,
+,,,cos_e_v1.11_question_description_option_id,True,,,,,,,,,,,,,,,,,
+,,,cos_e_v1.11_question_description_option_text,True,,,,,,,,,,,,,,,,,
+,,,cos_e_v1.11_question_option_description_id,True,,,,,,,,,,,,,,,,,
+,,,cos_e_v1.11_question_option_description_text,True,,,,,,,,,,,,,,,,,
+revisit,,,cosmos_qa_context_description_question_answer_id,True,,,,,,,,,,,,,,,,,
+,,,cosmos_qa_context_description_question_answer_text,True,,,,,,,,,,,,,,,,,
+,,,cosmos_qa_context_description_question_text,,True,,,,,,,,,,,,,,,,
+,,,cosmos_qa_context_question_answer_description_id,True,,,,,,,,,,,,,,,,,
+,,,cosmos_qa_context_question_answer_description_text,True,,,,,,,,,,,,,,,,,
+,,,cosmos_qa_context_question_description_answer_id,True,,,,,,,,,,,,,,,,,
+,,,cosmos_qa_context_question_description_answer_text,True,,,,,,,,,,,,,,,,,
+,,,cosmos_qa_context_question_description_text,,True,,,,,,,,,,,,,,,,
+,,,cosmos_qa_description_context_question_answer_id,True,,,,,,,,,,,,,,,,,
+,,,cosmos_qa_description_context_question_answer_text,True,,,,,,,,,,,,,,,,,
+,,,cosmos_qa_description_context_question_text,,True,,,,,,,,,,,,,,,,
+,,,cosmos_qa_no_prompt_id,True,,,,,,,,,,,,,,,,,
+,,,cosmos_qa_no_prompt_text,True,,,,,,,,,,,,,,,,,
+,,,dbpedia_14_dbpedia_1,,True,,,,,,,,,,,,,,,,
+,,,dbpedia_14_dbpedia_10,True,,,,,,,,,,,,,,,,,
+,,,dbpedia_14_dbpedia_3,,True,,,,,,,,,,,,,,,,
+,,,dbpedia_14_dbpedia_5,,True,,,,,,,,,,,,,,,,
+,,,dbpedia_14_dbpedia_7,,True,,,,,,,,,,,,,,,,
+,,,dbpedia_14_dbpedia_8,,True,,,,,,,,,,,,,,,,
+,,,dbpedia_14_dbpedia_9,True,,,,,,,,,,,,,,,,,
+,,,dream_answer_to_dialogue,,,,,True,,,,,,,,,,,,,
+,,,dream_baseline,True,,,,,,,,,,,,,,,,,
+,,,dream_conversation,True,,,,,,,,,,,,,,,,,
+,,,dream_generate_first_utterance,,,,,True,,,,,,,,,,,,,
+,,,dream_generate_last_utterance,,,,,True,,,,,,,,,,,,,
+,True,,emo_feeling,True,,,,,,,,,,,,,,,,,
+,True,,emo_final_message,True,,,,,,,,,,,,,,,,,
+,True,,emo_persons_describe,True,,,,,,,,,,,,,,,True,,
+,True,,emo_persons_infer,True,,,,,,,,,,,,,,,,,
+,True,,emo_spoke_last,True,,,,,,,,,,,,,,,,,
+,,,freebase_qa_inference_chain_prompt,,,,,,,,,,True,,,,,,,,
+,,,freebase_qa_inference_chain_prompt_context,,,,,,,,,,True,,,,,,,,
+,,,freebase_qa_qa_context_1,,,,,,,,,,,,,,,,,,
+,,,freebase_qa_qa_context_2,,,,,,,,,,,,,,,,,,
+,,,freebase_qa_qa_template_basic,,,,,,,,,,,,,,,,,,
+,,,gigaword_Document_,,,,,,,True,,,,,,,,,,,
+,,,gigaword_Summarize_this_document_,,,,,,,True,,,,,,,,,,,
+,,,gigaword_TLDR,,,,,,,True,,,,,,,,,,,
+,,,gigaword_generate_summary_for_this,,,,,,,True,,,,,,,,,,,
+,,,gigaword_in_a_nutshell,,,,,,,True,,,,,,,,,,,
+,,,gigaword_reverse_writing,,,,,,,,,,True,,,,,,,,
+,,,gigaword_reverse_writing_2,,,,,,,True,,,,,,,,,,,
+,,,gigaword_summarize_,,,,,,,True,,,,,,,,,,,
+,,,gigaword_write_one_sentence,,,,,,,True,,,,,,,,,,,
+,True,True,glue_cola_Following_sentence_acceptable,True,,,,,,,,,,,,,,,,,
+,True,True,glue_cola_Make_sense_yes_no,,,True,,,,,,,,,,,,,,,
+,True,True,glue_cola_Previous_sentence_acceptable,,,,True,,,,,,,,,,,,,,
+,True,True,glue_cola_editing,,,True,,,,,,,,,,,,,,,
+,True,True,glue_cola_jinja_example,,,,True,,,,,,,,,,,,,,
+,True,,glue_mrpc_equivalent,True,,,,,,,,,,,,,,True,,,
+,True,,glue_mrpc_paraphrase,,,,True,,,,,,,,,,,,,,
+,True,,glue_mrpc_replace,,,,True,,,,,,,,,,,,,,
+,True,,glue_mrpc_same_thing,,,,True,,,,,,,,,,,True,,,
+,True,,glue_mrpc_want_to_know,,,,True,,,,,,,,,,,True,,,
+,,,glue_qqp_answer,,,,True,,,,,,,,,,,,,,
+,,,glue_qqp_duplicate,,,,True,,,,,,,,,,,,,,
+,,,glue_qqp_duplicate_or_not,True,,,,,,,,,,,,,,,,,
+,,,glue_qqp_quora,,,,True,,,,,,,,,,,,True,,
+,,,glue_qqp_same_thing,,,,True,,,,,,,,,,,,,,
+,,,glue_sst2_following_positive_negative,True,,,,,,,,,,,,,,,,,
+,,,glue_sst2_happy_or_mad,True,,,,,,,,,,,,,,,,,
+,,,glue_sst2_positive_negative_after,True,,,,,,,,,,,,,,,,,
+,,,glue_sst2_review,True,,,,,,,,,,,,,,,,,
+,,,glue_sst2_said,True,,,,,,,,,,,,,,,,,
+,,True,glue_stsb_examples,,,,,,,,,,,,,,,,,,
+,,True,glue_stsb_rank,,,,,,,,,,,,,,,,,,
+,,True,glue_stsb_rate,,,,,,,,,,,,,,,,,,
+,,True,glue_stsb_score,,,,,,,,,,,,,,,,,,
+,,True,glue_stsb_similarity,,,,,,,,,,,,,,,,,,
+,True,True,hans_GPT_3_style,True,,,,,,,,,,,,,,,,,
+,True,True,hans_Suppose_Can_we_infer_that_,,,,True,,,,,,,,,,,,,,
+,True,True,hans_based_on_the_previous_passage,,,,True,,,,,,,,,,,,,,
+,True,True,hans_does_S1_entail_S2_,,,True,,,,,,,,,,,,,,,
+,True,True,hans_given_does_it_follow_that_,,,True,,,,,,,,,,,,,,,
+,True,True,hans__does_the_previous_passage_support_the_claim_that,,,,True,,,,,,,,,,,,,,
+,,,hellaswag_YesNo_0,,,True,,,,,,,,,,,,,,,
+,,,hellaswag_YesNo_1,,,True,,,,,,,,,,,,,,,
+,,,hellaswag_YesNo_2,,,True,,,,,,,,,,,,,,,
+,,,hellaswag_YesNo_3,,,True,,,,,,,,,,,,,,,
+,,,hellaswag_YesNo_reversed_0,,,True,,,,,,,,,,,,,,,
+,,,hellaswag_YesNo_reversed_1,,,True,,,,,,,,,,,,,,,
+,,,hellaswag_YesNo_reversed_2,,,True,,,,,,,,,,,,,,,
+,,,hellaswag_YesNo_reversed_3,,,True,,,,,,,,,,,,,,,
+,,,hellaswag_complete_first_then,True,,,,,,,,,,,,,,,,,
+,,,hellaswag_first_then,True,,,,,,,,,,,,,,,,,
+,,,hellaswag_how_ends,True,,,,,,,,,,,,,,,,,
+,,,hellaswag_if_begins_how_continues,True,,,,,,,,,,,,,,,,,
+,,,hellaswag_which_ending,True,,,,,,,,,,,,,,,,,
+,,,imdb_imdb_1,,True,,,,,,,,,,,,,,,,
+,,,imdb_imdb_2,,True,,,,,,True,,,,,,,,,,
+,,,imdb_imdb_3,,True,,,,,,,,,,,,,,,,
+,,,imdb_imdb_4,,True,,,,,,,,,,,,,,,,
+,,,imdb_imdb_5,,True,,,,,,,,,,,,True,,,,
+,,,imdb_imdb_6,,True,,,,,,,,,,,,,,,,
+,,,imdb_imdb_7,,True,,,,,,,,,,,,,,,,
+,,,imdb_imdb_8,,True,,,,,,,,,,,,,,,,
+,,,imdb_imdb_9,,,,True,,,,,,,,,,,,,,
+,True,,mc_taco_mc_taco_1,,,,True,,,,,,,,,,,,,,
+,,,mc_taco_mc_taco_2,,,,,,,,,,True,,,,,,,,
+,True,,mc_taco_mc_taco_3,,,True,,,,,,,,,,,True,,,,
+,,,mc_taco_mc_taco_4,True,,,,,,,,,True,,,,,,,,
+,,,mc_taco_mc_taco_5,,,,,True,,,,,,,,,,,,,
+,,,mc_taco_mc_taco_6,,True,,,,,,,,,,,,,,,,
+,True,True,nq_open_context_self_description,,,,,,,,,,,,,,,,,,
+,,True,nq_open_guess_question,,,,,True,,,,,,,,,,,,,
+,True,True,nq_open_question_answer,,,,,,,,,,,,,,,,,,
+,True,True,nq_open_question_with_instruction,,,,,,,,,,,,,,,,,,
+,,,onestop_english_ara_context,True,,,,,,,,,,,,,,,,,
+,,,onestop_english_assess,True,,,,,,,,,,,,,True,,,,
+,,,onestop_english_ats,True,,,,,,,,,,,,,,,,,
+,,,onestop_english_esl_context,True,,,,,,,,,,,,,True,,,,
+,,,onestop_english_esl_variation,True,,,,,,,,,,,,,True,,,,
+,True,,openbookqa_main_choices,True,,,,,,,,,,,,,,,,,
+,True,,openbookqa_main_choose_an_answer_with_options,True,,,,,,,,,,,,,,,,,
+,True,,openbookqa_main_only_options,True,,,,,,,,,,,,,,,,,
+,True,,openbookqa_main_pick_answer_with_options,True,,,,,,,,,,,,,,,,,
+,True,,openbookqa_main_pick_using_id,True,,,,,,,,,,,,,,,,,
+,True,,openbookqa_main_which_correct,True,,,,,,,,,,,,,,,,,
+,,True,openbookqa_main_which_correct_inverse,True,,,,,,,,,,,,True,,,,,
+,,,paws_labeled_final_Concatenation,,,True,,,,,,,,,,True,,,,,
+,,,paws_labeled_final_Concatenation_no_label,,,,True,,,,,,,,,True,,,,,
+,,,paws_labeled_final_Meaning,,,True,,,,,,,,,,True,,,,,
+,,,paws_labeled_final_Meaning_no_label,,,,True,,,,,,,,,True,,,,,
+,,,paws_labeled_final_PAWS_ANLI_GPT3,True,,,,,,,,,True,,,,,,,,
+,,,paws_labeled_final_PAWS_ANLI_GPT3_no_label,,True,,,,,,,,True,,,,,,,,
+,,,piqa_Correct_the_solution,,,,,True,,,,,,,,,,,,,
+,,,piqa_Correct_the_solution_if_false_from_sol_1,,,,,True,,,,,,,,,,,,,
+,,,piqa_Correct_the_solution_if_false_from_sol_2,,,,,True,,,,,,,,,,,,,
+should use jinja choice,,,piqa_Does_this_solution_make_sense_sol1,,,,True,,,,,,,,,,,,,,
+,,,piqa_Does_this_solution_make_sense_sol2,,,,True,,,,,,,,,,,,,,
+,,,piqa_Generate_a_similar_but_wrong_solution,,,,,True,,,,,,,,,,,,,
+,,,piqa_choose_the_most_appropriate_solution,True,,,,,,,,,,,,,,,,,
+duplicate of above,,True,piqa_choose_the_most_appropriate_solution_reorder_solution,True,,,,,,,,,,,,,,,,,
+,,,piqa_no_prompt_needed,,,,,True,,,,,,,,,,,,,
+,,,qa_srl_aq,,,,,True,True,,,,,,,,,,,,
+,,,qa_srl_context_answer,,,,,True,,,,,,,,,,,,,
+,,,qa_srl_context_qn,,,,,True,,,,,,,,,,,,,
+,,,qa_srl_predicate,,,,,,,,,,True,,,,,,,,
+need non-naive metric,True,,qa_srl_qa,,,,,,,,,,,,,,,,,,
+,,,qasc_is_correct_0,,,,True,,,,,,,,,,,,,,
+,,,qasc_is_correct_1,,,,True,,,,,,,,,,,,,,
+,,,qasc_qu_combined,True,,,,,,,,,,,,,,,,,
+,,,qasc_sep_combined_can_tell,True,,,,,,,,,,,,,,,,,
+,,,qasc_sep_qu,True,,,,,,,,,,,,,,,,,
+,,,quail_context_description_question_answer_id,True,,,,,,,,,,,,,,,,,
+,,,quail_context_description_question_answer_text,True,,,,,,,,,,,,,,,,,
+,,,quail_context_description_question_text,,True,,,,,,,,,,,,,,,,
+,,,quail_context_question_answer_description_id,True,,,,,,,,,,,,,,,,,
+,,,quail_context_question_answer_description_text,True,,,,,,,,,,,,,,,,,
+,,,quail_context_question_description_answer_id,True,,,,,,,,,,,,,,,,,
+,,,quail_context_question_description_answer_text,True,,,,,,,,,,,,,,,,,
+,,,quail_context_question_description_text,True,,,,,,,,,,,,,,,,,
+,,,quail_description_context_question_answer_id,,True,,,,,,,,,,,,,,,,
+,,,quail_description_context_question_answer_text,True,,,,,,,,,,,,,,,,,
+,,,quail_description_context_question_text,,True,,,,,,,,,,,,,,,,
+,,,quail_no_prompt_id,True,,,,,,,,,,,,,,,,,
+,,,quail_no_prompt_text,True,,,,,,,,,,,,,,,,,
+,,,quartz_para_question_1,True,,,,,,,,,,,,,,,,,
+near duplicate of the above,,True,quartz_para_question_1_reverse,True,,,,,,,,,,,,,,,,,
+,,,quartz_para_question_2,True,,,,,,,,,,,,,,,,,
+,,,quartz_para_question_3_choices,True,,,,,,,,,,,,,,,,,
+,,,quartz_para_question_4_choices,True,,,,,,,,,,,,,,,,,
+,,,quartz_para_question_plain,True,,,,,,,,,,,,,,,,,
+near duplicate of the above,,True,quartz_para_question_plain_reverse,True,,,,,,,,,,,,,,,,,
+,,,quartz_question_para_1,True,,,,,,,,,,,,,,,,,
+near duplicate of the above,,True,quartz_question_para_1_reverse,True,,,,,,,,,,,,,,,,,
+,,,quartz_question_para_2,True,,,,,,,,,,,,,,,,,
+,,,quartz_question_para_3,True,,,,,,,,,,,,,,,,,
+near duplicate of the above,,True,quartz_question_para_3_reverse,True,,,,,,,,,,,,,,,,,
+,,,quoref_Template_1,,,,,,,,,,,,,,,,,,
+,,,quoref_Template_2,,,,,,,,,,,,,,True,,,,
+,,,quoref_Template_3,,,,,True,,,,,,True,,,,,,,
+,,,quoref_Template_4,,,,,,,,,,True,,,,,,,True,
+,,,quoref_Template_5,,,,,,,,,,True,,,,,,,,
+,,,race_high_Read_the_article_and_answer_the_question_no_option_,,True,,,,,,,,,,,,,,,,
+,True,,race_high_Read_the_article_and_select_the_best_answer,True,,,,,,,,,,,,,,,,,
+near duplicate of the above,,True,race_high_Read_the_article_and_select_the_best_answer2,True,,,,,,,,,,,,,,,,,
+near duplicate of the above,,True,race_high_Read_the_article_and_select_the_best_answer3,True,,,,,,,,,,,,,,,,,
+,,,race_high_Write_a_multi_choice_question_for_the_following_article,,,,,True,,,,,,,,,,,,,
+,,,race_high_Write_a_multi_choice_question_for_the_following_article_2,,,,,True,,,,,,,,,,,,,
+,,,race_middle_Read_the_article_and_answer_the_question_no_option_,,True,,,,,,,,,,,,,,,,
+,True,,race_middle_Read_the_article_and_select_the_best_answer,True,,,,,,,,,,,,,,,,,
+near duplicate of the above,,True,race_middle_Read_the_article_and_select_the_best_answer2,True,,,,,,,,,,,,,,,,,
+near duplicate of the above,,True,race_middle_Read_the_article_and_select_the_best_answer3,True,,,,,,,,,,,,,,,,,
+,,,race_middle_Write_a_multi_choice_question_for_the_following_article,,,,,True,,,,,,,,,,,,,
+,,,race_middle_Write_a_multi_choice_question_for_the_following_article_2,,,,,True,,,,,,,,,,,,,
+,,,ropes_funky_prompt,True,,,,,,,,,,,,,,,,,
+,,,ropes_plain,True,,,,,,,,,,,,,,,,,
+,,,ropes_plain_bottom_hint,True,,,,,,,,,,,,,True,,,,
+,,,ropes_plain_no_background,True,,,,,,,,,True,,,,,,,,
+,,,ropes_prompt_beginning,True,,,,,,,,,,,,,,,,,
+,,,ropes_prompt_bottom_hint_beginning,True,,,,,,,,,,,,,,,,,
+,,,ropes_prompt_bottom_no_hint,True,,,,,,,,,True,,,,,,,,
+,,,ropes_prompt_mix,True,,,,,,,,,,,,,True,,,,
+,,,rotten_tomatoes_rt_1,,True,,,,,,,,,,,,,,,,
+,,,rotten_tomatoes_rt_10,True,,,,,,,,,,,,,,,,,
+,,,rotten_tomatoes_rt_2,,True,,,,,,,,,,,,,,,,
+,,,rotten_tomatoes_rt_3,,True,,,,,,,,,,,,,,,,
+,,,rotten_tomatoes_rt_4,,True,,,,,,,,,,,,,,,,
+,,,rotten_tomatoes_rt_5,,True,,,,,,,,,,,,,,,,
+,,,rotten_tomatoes_rt_6,,True,,,,,,,,,,,,,,,,
+,,,rotten_tomatoes_rt_7,,True,,,,,,,,,,,,,,,,
+,,,rotten_tomatoes_rt_8,,True,,,,,,,,,,,,,,,,
+,,,rotten_tomatoes_rt_9,,,,True,,,,,,,,,,,,,,
+,,,sciq_Template_0,,True,,,,,,,,,,,True,,,,,
+,,,sciq_Template_1,,True,,,,,,,,,,,True,,,,,
+,True,,social_i_qa_social_i_qa1,True,,,,,,,,,,,,,,,,,
+,,,social_i_qa_social_i_qa2,,True,,,,,,,,,,,,,,,,
+select answer by ordinal word,True,,social_i_qa_social_i_qa3,True,,,,,,,,,,,,,,,,,
+,,,social_i_qa_social_i_qa4,,,,,True,,,,,,,,,,,,,
+4-way to binary classification,,,social_i_qa_social_i_qa5,,,,True,,,,,,,,,,,,,,
+,,,squad_v2_Jeopardy_with_Context,,,,,True,,,,,,,,,,,,,
+,,,squad_v2_Jeopardy_without_Context,,,,,True,,,,,True,,,,,,,,
+,,,squad_v2_Questions_with_Context,True,,,,,,,,,,,,,,,,,
+nicely randomnized prompt phrasing,,,squad_v2_Questions_with_Context_Without_Prompt_Keywords,True,,,,,,,,,,,,,,,,,
+,,,squad_v2_Topic_Prediction_Context,,,,,,,,,,True,,,,,,,,
+,,,squad_v2_Topic_Prediction_Context_with_randomized_prompt_options,,,,,,,,,,True,,,,,,,,
+,,,squad_v2_Topic_Prediction_Context_with_randomized_prompt_options_placed_in_the_end,,,,,,,,,,True,,,,,,,,
+,,,squad_v2_Topic_Prediction_Question_and_Answer_Pair,,,,,,,,,,True,,,,,,,,
+,,,squad_v2_Trivia,,,,,,,,,,True,,,,,,,,
+,True,,super_glue_boolq_GPT_3_Style,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_boolq_I_wonder_,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_boolq_based_on_the_following_passage,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_boolq_based_on_the_previous_passage,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_boolq_could_you_tell_me_,,,,True,,,,,,,,,,,,,,
+,True,True,super_glue_cb_GPT_3_style,True,,,,,,,,,,,,,,,,,
+,True,True,super_glue_cb_based_on_the_previous_passage,True,,,,,,,,,,,,,,,,,
+contrapositive,True,True,super_glue_cb_does_S1_contradict_S2_,True,,,,,,,,,True,,,,,,,,
+,True,True,super_glue_cb_does_S1_entail_S2_,True,,,,,,,,,,,,,,,,,
+,True,True,super_glue_cb_given_does_it_follow_that_,True,,,,,,,,,,,,,,,,,
+must/might/may be true,True,True,super_glue_cb_given_it_must_be_true_that_,True,,,,,,,,,,,,,,,,,
+,True,,super_glue_copa_C1_or_C2_premise_so_because_,True,,,,,,,,,,,,,,,,,
+effect examples,True,,super_glue_copa__As_a_result_C1_or_C2_,True,,,,,,,,,,,,,,,,,
+effect examples,True,,super_glue_copa__What_could_happen_next_C1_or_C2_,True,,,,,,,,,,,,,,,,,
+cause examples,True,,super_glue_copa__which_may_be_caused_by,True,,,,,,,,,,,,,,,,,
+effect examples,True,,super_glue_copa__which_may_cause_C1_or_C2_,True,,,,,,,,,,,,,,,,,
+cause examples,True,,super_glue_copa__why_C1_or_C2,True,,,,,,,,,,,,,,,,,
+,True,,super_glue_multirc_I_was_going_to_say_,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_multirc_Would_it_be_good_to_answer_,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_multirc_is_a_correct_answer_,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_multirc_is_the_correct_answer_,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_multirc_paragraph_question_is_it_,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_record_Can_you_figure_out_,,True,,,,,,,,,,,,,,,,
+,True,,super_glue_record_In_the_question_above_the_placeholder_stands_for,,True,,,,,,,,,,,,,,,,
+,True,,super_glue_record_What_could_the_placeholder_be_,True,,,,,,,,,,,,,,,,,
+no difference here?,True,,super_glue_record_Which_one_is_the_placeholder_,True,,,,,,,,,,,,,,,,,
+,True,,super_glue_record_the_placeholder_refers_to_,,True,,,,,,,,,,,,,,,,
+,True,True,super_glue_rte_GPT_3_style,True,,,,,,,,,,,,,,,,,
+,True,True,super_glue_rte_Suppose_Can_we_infer_that_,,,,True,,,,,,,,,,,,,,
+,True,True,super_glue_rte_based_on_the_previous_passage,,,,True,,,,,,,,,,,,,,
+,True,True,super_glue_rte_does_S1_entail_S2_,,,True,,,,,,,,,,,,,,,
+,True,True,super_glue_rte_given_does_it_follow_that_,,,,True,,,,,,,,,,,,,,
+,True,True,super_glue_rte__Therefore_we_re_licensed_to_say_that_,,,,True,,,,,,,,,,,,,,
+,True,True,super_glue_rte__does_the_previous_passage_support_the_claim_that,,,,True,,,,,,,,,,,,,,
+,True,,super_glue_wic_GPT_3_prompt,,,,True,,,,,,,,,,,True,,,
+,True,,super_glue_wic_GPT_3_prompt_with_label,,,True,,,,,,,,,,,,True,,,
+,True,,super_glue_wic_question_context,,,,True,,,,,,,,,,,True,,,
+,True,,super_glue_wic_question_context_meaning,,,,True,,,,,,,,,,,True,,,
+,True,,super_glue_wic_question_context_meaning_with_label,,,True,,,,,,,,,,,,True,,,
+,True,,super_glue_wic_similar_sense,,,,True,,,,,,,,,,,True,,,
+,True,,super_glue_wsc.fixed_Here_p_stands_for_,,,,,,,,,,,,,,,,,,
+,True,,super_glue_wsc.fixed_In_the_previous_sentence_the_pronoun_refers_to_,,,,,,,,,,,,,,,,,,
+,True,,super_glue_wsc.fixed_Who_is_are_,,,,,,,,,,,,,,,,,,
+,True,,super_glue_wsc.fixed_in_the_passage_above_the_pronoun_X_refers_to_,,,,,,,,,,,,,,,,,,
+,True,,super_glue_wsc.fixed_passage_what_does_the_pronoun_refer_to_,,,,,,,,,,,,,,,,,,
+cast 4-way classification as binary,,,swag_regular_YesNo_0,,,True,,,,,,,,,,,,,,,
+,,,swag_regular_YesNo_1,,,True,,,,,,,,,,,,,,,
+,,,swag_regular_YesNo_2,,,True,,,,,,,,,,,,,,,
+,,,swag_regular_YesNo_3,,,True,,,,,,,,,,,,,,,
+,,,swag_regular_YesNo_reversed_0,,,True,,,,,,,,,,,,,,,
+,,,swag_regular_YesNo_reversed_1,,,True,,,,,,,,,,,,,,,
+,,,swag_regular_YesNo_reversed_2,,,True,,,,,,,,,,,,,,,
+,,,swag_regular_YesNo_reversed_3,,,True,,,,,,,,,,,,,,,
+,,,swag_regular_complete_first_then,True,,,,,,,,,,,,,,,,,
+,,,swag_regular_first_then,True,,,,,,,,,,,,,,,,,
+,,,swag_regular_how_ends,True,,,,,,,,,,,,,,,,,
+,,,swag_regular_if_begins_how_continues,True,,,,,,,,,,,,,,,,,
+,,,swag_regular_which_ending,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_ABBR,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_ABBR_context_first,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_DESC,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_DESC_context_first,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_ENTY,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_ENTY_context_first,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_HUM,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_HUM_context_first,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_LOC,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_LOC_context_first,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_NUM,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_NUM_context_first,True,,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_open,,True,,,,,,,,,,,,,,,,
+,,,trec_fine_grained_open_context_first,,True,,,,,,,,,,,,,,,,
+answers are not what the questions ask for,,True,trec_gao_et_al_1,,,,,,,,,,,,True,,,,,,
+answers are not what the questions ask for,,True,trec_gao_et_al_2,,,,,,,,,,,,True,,,,,,
+,,,trec_trec1,True,,,,,,,,,,,,,,,,,
+,,,trec_trec2,True,,,,,,,,,,,,,,,,,
+,,,trivia_qa_rc_context_self_description,,,,,,,,,,,,,,,,,,
+,,,trivia_qa_rc_guess_question,,,,,True,True,,,,True,,,,,,,,
+,,,trivia_qa_rc_question_answer,,,,,,,,,,,,,,,,,,
+,,,trivia_qa_rc_question_with_instruction,,,,,,,,,,,,,,,,,,
+,,,trivia_qa_rc_reading_comprehension_1,,,,,,,,,,True,,,,,,,,
+,,,trivia_qa_rc_reading_comprehension_2,,,,,,,,,,True,,,,,,,,
+,,,web_questions_count_answers,,,,,,,,,True,,,,,,,,,
+,,,web_questions_credible_question,,,,,True,,,,,,,,,,,,,
+,,,web_questions_if_answers_what_question,,,,,True,,,,,,,,,,,,,
+,,,web_questions_potential_correct_answer,,,,,,,,,,,True,,,,,,,
+,,,web_questions_question_answer,,,,,,,,,,,,,,,,,,
+,,,web_questions_suggest_question,,,,,True,,,,,,,,,,,,,
+,,,wiki_bio_comprehension,,,,,,,,,,True,,,,,,,,
+,,,wiki_bio_guess_person,,,,,,,,,,True,,,,,,,,
+,,,wiki_bio_key_content,,,,,,,,,,True,,,,,,,,
+,,,wiki_bio_what_content,,,,,,,,,,True,,,,,,,,
+"should rephrase ""summarize""",,,wiki_bio_who,,,,,,,,,,,,,,,,,,
+,,,wiki_hop_original_Choose_Best_Object_Candidate,,,,,,,,,,True,,,,,,,,True
+,,,wiki_hop_original_Explain_Relation,,True,,,,,,,,True,,,,,,,,
+,,,wiki_hop_original_Generate_Fact_Triple,,,,,,,,,,True,,,,,,,,True
+,,,wiki_hop_original_Generate_Object_Answer,,,,,,,,,,True,,,,,,,,True
+,,,wiki_hop_original_Generate_Subject_Answer,,,,,,,,,,True,,,,,,,,True
+,,,wiki_hop_original_Indirect_Question_about_Birthplace_Citizenship_Place_of_Death,,,,,,,,,,,,,True,,,,,
+,,,wiqa_effect_with_label_answer,True,,,,,,,,,,,,,,,,,
+,,,wiqa_effect_with_string_answer,True,,,,,,,,,,,,,,,,,
+,,,wiqa_impacting_the_process,,,,True,,,,,,,,,,,,,,
+,,,wiqa_question_type,,,,,,,,,,True,,,,,,,,
+,,,wiqa_remove_first_step,,,,,,,,,,True,,,,,,,,
+,,,wiqa_remove_first_step_bis,,,,,,,,,,True,,,,,,,,
+,,,wiqa_remove_last_step,,,,,,,,,,True,,,,,,,,
+,,,wiqa_remove_last_step_bis,,,,,,,,,,True,,,,,,,,
+,True,,xsum_Document_,,,,,,,,,,,,,,,,,,
+,True,,xsum_Summarize_this_document_,,,,,,,,,,,,,,,,,,
+,True,,xsum_TLDR,,,,,,,,,,,,,,,,,,
+,True,,xsum_generate_summary_for_this,,,,,,,,,,,,,,,,,,
+,True,,xsum_summarize_,,,,,,,,,,,,,,True,,,,
+,True,,xsum_write_one_sentence,,,,,,,,,,,,,,,,,,
+,,,yelp_review_full_based_on_that,,True,,,,,,,,,,,,,,,,
+,,,yelp_review_full_format_rating,,True,,,,,,,,,,,,,,,,
+,,,yelp_review_full_format_score,,True,,,,,,,,,,,,,,,,
+,,,yelp_review_full_format_star,,True,,,,,,,,,,,,,,,,
+,,,yelp_review_full_on_a_scale,,True,,,,,,,,,,,,,,,,
+,,,yelp_review_full_so_i_would,,True,,,,,,,,,,,,,,,,
+,,,yelp_review_full_this_place,,True,,,,,,,,,,,,,,,,
diff --git a/promptsource/seqio_tasks/experiment_D4.csv b/promptsource/seqio_tasks/experiment_D4.csv
new file mode 100644
index 0000000000000000000000000000000000000000..71c8216cc0ab36a7c55625e3754d83e566a96468
--- /dev/null
+++ b/promptsource/seqio_tasks/experiment_D4.csv
@@ -0,0 +1,242 @@
+HF_name,subset,task_by_convention,format,comment,seed_paper,september_check,do_train,do_eval,train_size,adjusted_train_size,D3_do_train,D3_do_eval,D3_adjusted_train_size,metric,multiple correct answer,Paper link,non_linguistic_knowledge,skip,Imported Task Name,imported category,input_length,_human_skill,Domain,Reference
+crows_pairs,,bias_and_fairness,,test set only; authors themselves acknowledge some problems,Eval WG,,,TRUE,,,,,,,,,,,,,,,,
+jigsaw_toxicity_pred,,bias_and_fairness,,current https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data ; want https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification,Eval WG,,,TRUE,,,,,,,,,,,,,,,,
+super_glue,axg,bias_and_fairness,cls,test set only,Eval WG,,,TRUE,,,,,,,,,,,,,,,,
+winogender,,bias_and_fairness,cls,also as axg in super_glue,Eval WG,,,TRUE,,,,,,,,,,,,,,,,
+wino_bias,type1_anti,bias_and_fairness,cls,,Eval WG,,,TRUE,,,,,,,,,,,,,,,,
+wino_bias,type2_anti,bias_and_fairness,cls,,Eval WG,,,TRUE,,,,,,,,,,,,,,,,
+wino_bias,type1_pro,bias_and_fairness,cls,,Eval WG,,,TRUE,,,,,,,,,,,,,,,,
+wino_bias,type2_pro,bias_and_fairness,cls,,Eval WG,,,TRUE,,,,,,,,,,,,,,,,
+super_glue,wsc.fixed,coreference,cls,,,,,TRUE,554,0,TRUE,TRUE,554,accuracy,,https://arxiv.org/pdf/1905.00537.pdf,,,superglue-wsc,cls/other,single sentence,knowledge-? reading comprehension,,Levesque et al. 2012
+winograd_wsc,wsc273,coreference,ext,,GPT,,,TRUE,0,0,,,0,accuracy,,https://www.aaai.org/ocs/index.php/KR/KR12/paper/download/4492/4924,,,,,,,,Levesque et al. 2012
+winogrande,winogrande_xl,coreference,ext,,GPT,TRUE,,TRUE,40398,0,,,0,accuracy,,https://arxiv.org/pdf/1907.10641.pdf,,,WinoGrande,qa/multiple-choice qa,,knowledge-? reading comprehension,,Sakaguchi et al. 2020
+winogrande,winogrande_debiased,coreference,ext,"""debiased"" = adversarially filtered",GPT,TRUE,,TRUE,9248,0,,,0,accuracy,,https://arxiv.org/pdf/1907.10641.pdf,,,WinoGrande,qa/multiple-choice qa,,knowledge-? reading comprehension,,Sakaguchi et al. 2020
+glue,cola,grammatical_acceptability,cls,includes semantic acceptability too; to be replaced by blimp,,,,TRUE,8551,0,,TRUE,0,accuracy;matthews_corrcoef,,https://arxiv.org/pdf/1805.12471.pdf,,,glue-cola,cls/other,single sentence,,,Warstadt et al. 2019
+super_glue,cb,NLI,cls,"""for multi-class F1 we compute the unweighted average of the F1 per class.""",,TRUE,,TRUE,250,0,,TRUE,0,mean_multiclass_f1;accuracy,,https://semanticsarchive.net/Archive/Tg3ZGI2M/Marneffe.pdf,,,superglue-cb,cls/nli,sentence pair,knowledge-neutral inference,,de Marneffe et al. 2019
+super_glue,rte,NLI,cls,,,TRUE,,TRUE,2490,0,,TRUE,0,accuracy,,https://arxiv.org/pdf/1905.00537.pdf,,,superglue-rte,cls/nli,sentence pair,knowledge modest inference,,Dagan et al. 2005; Bar-Haim et al. 2006 Giampiccolo et al. 2007; Bentivogli et al. 2009
+anli,,NLI,cls,"In addition to accuracy, paper also evaluates on range of relaxed/strict and matched/unmatched settings and reports F scores for different answers",,,,TRUE,162865,0,,TRUE,0,accuracy,,https://arxiv.org/abs/1910.14599,,,anli,cls/nli,sentence pair,knowledge modest inference,,Nie et al. 2020
+hans,,NLI,cls,,,TRUE,,TRUE,0,0,,TRUE,0,accuracy,,https://arxiv.org/pdf/1902.01007.pdf,,,,,sentence pair,syntax?,,McCoy et al. 2019
+super_glue,axb,NLI,cls,test set only,,TRUE,,TRUE,0,0,,,,,,,,,,,,,,
+glue,mrpc,paraphrase,cls,,,,TRUE,TRUE,3668,3668,TRUE,TRUE,3668,accuracy;f1_score,,https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/I05-50025B15D.pdf,,,glue-mrpc,cls/paraphrase,,paraphrase,,Dolan and Brockett 2005
+glue,qqp,paraphrase,cls,,,,TRUE,TRUE,363846,363846,TRUE,,363846,accuracy;f1_score,,https://aclanthology.org/I05-5002.pdf,,,glue-qqp,cls/paraphrase,,,,(link)
+paws,labeled_final,paraphrase,cls,,,,TRUE,,49401,49401,TRUE,,49401,,,,,,paws,cls/paraphrase,,,,Zhang et al. 2019
+ai2_arc,ARC-Challenge,QA_closed_book,cls,,GPT,,,TRUE,1119,0,TRUE,,1119,"accuracy_with_tie : For each question, a system receives 1 point if it
+chooses the correct answer and 1/k if it reports a k-way tie
+(i.e., chooses multiple answers) that includes the correct answer.",,https://arxiv.org/pdf/1803.05457.pdf,mid-intensive,,ARC (chal.),qa/multiple-choice qa,,nontrivial_comprehension,,Clark et al. 2018
+ai2_arc,ARC-Easy,QA_closed_book,cls,,GPT,,,TRUE,2251,0,TRUE,,2251,"accuracy_with_tie: For each question, a system receives 1 point if it
+chooses the correct answer and 1/k if it reports a k-way tie
+(i.e., chooses multiple answers) that includes the correct answer.",,https://arxiv.org/pdf/1803.05457.pdf,mid-intensive,,ARC (easy),Multiple choice,,,,
+nq_open,,QA_closed_book,gen,,GPT,TRUE,,TRUE,87925,0,,TRUE,0,kilt-exact_match;average_accuracy_accross_answers,TRUE,https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00276/43518/Natural-Questions-A-Benchmark-for-Question,intensive,,Natural Questions (open domain),,,trivia,,
+kilt_tasks,hotpotqa,QA_closed_book,gen,recast as closed-book due to input length,self,,TRUE,,88869,88869,,,,,,,,,kilt hotpotqa,qa/closed-book qa,,encyclopedia; multi-hop QA,,Yang et al. 2018
+trivia_qa,unfiltered,QA_closed_book,gen,,GPT,TRUE,,TRUE,87622,0,TRUE,,87622,exact_match;f1_over_words => wikipedia aliases are considered valid answers,TRUE,https://arxiv.org/pdf/1705.03551.pdf,intensive,,Trivia QA,,,,,
+web_questions,,QA_closed_book,gen,"""supposed to be answerable by Freebase"" Check corpora deduplication with freebaseqa.",GPT,,,TRUE,3778,0,TRUE,,3778,accuracy : they don't mention how they normalize across multiple correct answers,TRUE,https://aclanthology.org/D13-1160.pdf,intensive,,web questions,qa/closed-book qa,,,,Berant et al. 2013
+wiki_qa,,QA_closed_book,cls,,CrossFit,,TRUE,,20360,20360,,,,,,https://aclanthology.org/D15-1237.pdf,,,wiki qa,cls/other,,,,Yang et al. 2015
+adversarial_qa,dbidaf,QA_extractive,ext,,,TRUE,TRUE,,10000,10000,TRUE,,10000,,,https://aclanthology.org/2020.tacl-1.43/,,,adversarialqa,qa/machine reading comprehension,,,,Bartolo et al. 2020
+adversarial_qa,dbert,QA_extractive,ext,,,TRUE,TRUE,,10000,10000,TRUE,,10000,,,,,,,,,,,
+adversarial_qa,droberta,QA_extractive,ext,,,TRUE,TRUE,,10000,10000,TRUE,,10000,,,,,,,,,,,
+coqa,,QA_extractive,ext,GPT-easy,GPT,,,TRUE,7199,,,,,"macro_average_f1: for computing a model’s performance, each individual prediction is compared
+against n human answers resulting in n F1 scores,
+the maximum of which is chosen as the prediction’s
+F1.For each question, we average out F1 across
+these n sets, both for humans and models. In our
+final evaluation, we use n = 4 human answers for
+every question (the original answer and 3 additionally collected answers). The articles a, an and the
+and punctuations are excluded in evaluation.",from the paper it seems it could contain multiple answers but the datasets has only one answer per question,https://arxiv.org/pdf/1808.07042.pdf,,,,,,,,
+duorc,SelfRC,QA_extractive,ext,,TaskEmbed;CrossFit,,TRUE,,60721,60721,,,,,,https://duorc.github.io/,,,DuoRC,qa/machine reading comprehension,,,Wikipedia/IMDB crowd,Saha et al. 2018
+duorc,ParaphraseRC,QA_extractive,ext,,TaskEmbed;CrossFit,,TRUE,,69524,69524,,,,,,https://arxiv.org/pdf/1804.07927.pdf,,,DuoRC,paraphrased QA,,,,Saha et al. 2018
+ropes,,QA_extractive,ext,,,TRUE,TRUE,,10924,10924,TRUE,,10924,,,,modest,,ropes,Extractive QA,,cause_and_effect;nontrivial_comprehension,,Lin et al. 2019
+squad_v2,,QA_extractive,ext,,GPT,,,TRUE,130319,0,TRUE,,130319,exact_match;f1_score,TRUE,https://arxiv.org/pdf/1806.03822.pdf,,,SQuAD 2.0,Extractive QA,,,,Rajpurkar et al. 2018
+super_glue,record,QA_extractive,ext,,,TRUE,,TRUE,100730,0,TRUE,TRUE,100730,max_token_level_f1;exact_match,TRUE,https://arxiv.org/pdf/1810.12885.pdf,,,superglue-record,qa/machine reading comprehension,,knowledge-? reading comprehension,,Zhang et al. 2018
+qa_srl,,QA_extractive,ext,"need non-naive metric (""If the predicted word is contained inside the annotated answer span it is considered a correct prediction.""); v2 not in HF https://aclanthology.org/P18-1191.pdf",Eval WG,,,TRUE,6414,0,TRUE,TRUE,6414,accuracy,TRUE,https://dada.cs.washington.edu/qasrl/#page-top,neutral,,qa srl,other,,semantic role,,He et al. 2015
+quac,,QA_extractive,ext,,GPT,,,TRUE,11567,,,,,"average_maximum_f1;HEQ-Q;HEQ-D:  To make oracle human and system performance comparable,
+given n references, we report the average of the
+maximum F1 computed from each n − 1 subset
+with respect to the heldout reference.",TRUE,https://arxiv.org/pdf/1808.07036.pdf,,,,,,dialogue,,
+quoref,,QA_extractive,ext,,,TRUE,TRUE,,19399,19399,TRUE,,19399,,,https://aclanthology.org/D19-1606.pdf,,,Quoref,Extractive QA,,,,Dasigi et al. 2019
+tydiqa,,QA_extractive,ext,,Eval WG,,TRUE,,9211,9211,,,,,,,,,,,,,,
+drop,,QA_generative,gen,"nontrivial math; try history_690, it's pretty hard even when I have domain knowledge",GPT,TRUE,,TRUE,,,,,,exact_match; macro_average_f1,TRUE,https://aclanthology.org/N19-1246.pdf,,,DROP ,multi-hop quantitative reasoning; Abstractive QA,,numerical,Wikipedia crowd,Dua et al. 2019
+cos_e,v1.11,QA_multiple_choice,cls,"same as commonsense_qa but with (poorly sourced) human explanations; questionable ""commonsense"" lots of world knowledge",Vania,TRUE,TRUE,,9741,9741,TRUE,,9741,,,,,,cos e,other/generate explanation,,,,Rajani et al. 2019
+cosmos_qa,,QA_multiple_choice,cls,,,TRUE,TRUE,,25262,25262,TRUE,,25262,,,,,,cosmos qa,qa/multiple-choice qa,,,,Huang et al. 2019
+dream,,QA_multiple_choice,cls,,,TRUE,TRUE,,6116,6116,TRUE,,6116,,,,,,dream,qa/multiple-choice qa,,,,Sun et al. 2019
+openbookqa,main,QA_multiple_choice,cls,interesting combo of pragmatics + scientific reasoning,GPT,,,TRUE,4957,0,TRUE,TRUE,4957,"accuracy_with_tie : For each question, a system receives 1 point if it
+chooses the correct answer and 1/k if it reports a k-way tie
+(i.e., chooses multiple answers) that includes the correct answer.",,https://aclanthology.org/D18-1260.pdf,modest,,openbookqa,qa/multiple-choice qa,,pragmatics,,Mihaylov et al. 2018
+qasc,,QA_multiple_choice,cls,,,TRUE,TRUE,,8134,8134,TRUE,,8134,,,,given?,,qasc,qa/multiple-choice qa,,,,Khot et al. 2020
+quail,,QA_multiple_choice,cls,,,TRUE,TRUE,,10246,10246,TRUE,,10246,,,,,,quail,qa/multiple-choice qa,,,,Rogers et al. 2020
+quarel,,QA_multiple_choice,cls,,CrossFit,,TRUE,,1941,1941,,,,,,,,,quarel,qa/multiple-choice qa,,logical form,,Tafjord et al. 2019a
+quartz,,QA_multiple_choice,cls,,,TRUE,TRUE,,2696,2696,TRUE,,2696,,,https://aclanthology.org/D19-1608.pdf,given?,,quartz-with knowledge,qa/multiple-choice qa,,,,Tafjord et al. 2019b
+race,high,QA_multiple_choice,cls,GPT-hard,GPT,,,TRUE,62445,0,TRUE,TRUE,62445,accuracy,,https://arxiv.org/pdf/1704.04683.pdff,neutral,,race-high,qa/multiple-choice qa,,knowledge-neutral reading comprehension,,Lai et al. 2017
+race,middle,QA_multiple_choice,cls,"revisit: define as comprehension, paragraph level?",GPT,,,TRUE,25421,0,TRUE,TRUE,25421,accuracy,,https://arxiv.org/pdf/1704.04683.pdf,neutral,,race-middle,qa/multiple-choice qa,,knowledge-neutral reading comprehension,,Lai et al. 2017
+sciq,,QA_multiple_choice,cls,,,TRUE,TRUE,,11679,11679,TRUE,,11679,,,,,,sciq,qa/multiple-choice qa,,,,Welbl et al. 2017
+social_i_qa,,QA_multiple_choice,cls,metric differ by prompt: 4-way classification cast as binary ,,TRUE,TRUE,TRUE,33410,33410,TRUE,TRUE,33410,accuracy,,https://arxiv.org/pdf/1904.09728.pdf,,,SIQA,qa/multiple-choice qa,,cultural knowledge,,Sap et al. 2019
+super_glue,boolq,QA_multiple_choice,cls,,,TRUE,,TRUE,9427,0,TRUE,TRUE,9427,accuracy,,https://arxiv.org/pdf/1905.10044.pdf,neutral?,,superglue-boolq,,,knowledge-? reading comprehension,,
+super_glue,copa,QA_multiple_choice,cls,,,TRUE,,TRUE,400,0,TRUE,TRUE,400,accuracy,,http://commonsensereasoning.org/2011/papers/Roemmele.pdf,modest,,superglue-copa,qa/multiple-choice qa,,causal cognition,,Gordon et al. 2012
+super_glue,multirc,QA_multiple_choice,cls,F1 over all answer options. See paper p. 259 for defintion,,TRUE,,TRUE,27243,0,TRUE,TRUE,27243,f1_over_all_options;exact_match,,https://aclanthology.org/N18-1023.pdf,neutral?,,superglue-multirc,qa/multiple-choice qa,,knowledge-? reading comprehension,,Khashabi et al. 2018
+wiki_hop,original,QA_multiple_choice,cls,,,TRUE,TRUE,,43738,43738,TRUE,,43738,,,https://transacl.org/ojs/index.php/tacl/article/viewFile/1325/299,,,WikiHop (Welbl et al. 2018),multi-hop QA,,,Wikipedia KB,
+wiqa,,QA_multiple_choice,cls,,,TRUE,TRUE,,29808,29808,TRUE,,29808,,,,,,wiqa,qa/multiple-choice qa,,cause_and_effect,,Tandon et al. 2019
+circa,,QA_multiple_choice,cls,revisit: problematic prompts,,,,TRUE,34268,0,,TRUE,0,mean_multiclass_f1;accuracy,,https://arxiv.org/pdf/2010.03450.pdf,,,circa,cls/other,,pragmatics,,Louis et al. 2020
+mc_taco,,QA_multiple_choice,cls,no train set; variable number of answer_chocies; eval in paper is over set of possible candidates;,,,,TRUE,0,0,,TRUE,0,exact_match; f1_score,,https://arxiv.org/pdf/1909.03065.pdf,,,mc taco,qa/binary,,temporal cognition,,Zhou et al. 2019
+piqa,,QA_multiple_choice,cls,revisit: not just other,GPT,,,TRUE,16113,0,TRUE,,16113,accuracy,,https://arxiv.org/pdf/1911.11641.pdf,,,PIQA,Multiple choice,,physical_cognition,,Bisk et al. 2020
+amazon_polarity,,sentiment,cls,,,TRUE,TRUE,,3600000,500000,TRUE,,500000,,,https://cs.stanford.edu/people/jure/pubs/reviews-recsys13.pdf,,,amazon polarity,cls/sentiment analysis,,,,McAuley and Leskovec 2013
+app_reviews,,sentiment,cls,,,TRUE,TRUE,,288065,288065,TRUE,,288065,,,,,,app reviews,other/regression,,,,Missing
+imdb,,sentiment,cls,,,TRUE,TRUE,,25000,25000,TRUE,,25000,,,,,,imdb,cls/sentiment analysis,,no dev set,,Maas et al. 2011
+rotten_tomatoes,,sentiment,cls,,,TRUE,TRUE,,8530,8530,TRUE,,8530,,,,,,rotten tomatoes,cls/sentiment analysis,,,,Pang and Lee 2005
+yelp_review_full,,sentiment,cls,no dev set,,TRUE,TRUE,,650000,500000,TRUE,,500000,,,,,,yelp review full,other/regression,,,,Zhang et al. 2015; (link)
+lambada,,story_completion,gen,revisit: story or cloze or coref? trivial cloze prompt; training set is just unlabeled corpora; GPT task,GPT,,,TRUE,0,0,,TRUE,0,accuracy;perplexity;median_rank,,https://arxiv.org/pdf/1606.06031.pdf,,,,,,,,
+craffel/openai_lambada,,story_completion,gen,revisit: story or cloze or coref? trivial cloze prompt; training set is just unlabeled corpora; GPT task,GPT,,,TRUE,0,0,,TRUE,0,accuracy;perplexity;median_rank,,https://arxiv.org/pdf/1606.06031.pdf,,,,,,,,
+story_cloze,2016,story_completion,cls,todo: custom loading; swag like?,GPT,,,TRUE,,0,,TRUE,0,accuracy,,https://arxiv.org/pdf/1604.01696.pdf,,,,,,,,
+hellaswag,,story_completion,cls,,GPT,,,TRUE,39905,0,TRUE,,39905,accuracy,,https://arxiv.org/pdf/1905.07830.pdf,,,hellaswag,qa/multiple-choice qa,,,,Zellers et al. 2019
+common_gen,,structure_to_text,gen,,,TRUE,TRUE,,67389,67389,TRUE,,67389,,,,,,common gen,other,,,,Lin et al. 2020b
+wiki_bio,,structure_to_text,gen,,,TRUE,TRUE,,582659,500000,TRUE,,500000,,,,,,wiki bio,cg/other,,,,Lebret et al. 2016
+cnn_dailymail,3.0.0,summarization,gen,,,TRUE,TRUE,,287113,287113,TRUE,,287113,,,,,,,,,,,
+gigaword,,summarization,gen,,,TRUE,TRUE,,3803957,500000,TRUE,,500000,,,,,,gigaword,cg/summarization,,,,Napoles et al. 2012
+multi_news,,summarization,gen,,CrossFit,,TRUE,,44972,44972,,,,,,,,,multi news,cg/summarization,,,,Fabbri et al. 2019
+samsum,,summarization,gen,,CrossFit,,TRUE,,14732,14732,,,,,,,,,samsum,cg/summarization,,,,Gliwa et al. 2019
+xsum,,summarization,gen,,,TRUE,TRUE,TRUE,204045,204045,TRUE,TRUE,204045,rouge,,https://arxiv.org/pdf/1808.08745.pdf,,,xsum,cg/summarization,,,,Narayan et al. 2018
+ag_news,,topic_classification,cls,,,TRUE,TRUE,,120000,120000,TRUE,,120000,,,http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html,,,ag news,cls/topic,,,,Gulli (link)
+dbpedia_14,,topic_classification,cls,,,TRUE,TRUE,,560000,500000,TRUE,,500000,,,https://svn.aksw.org/papers/2013/SWJ_DBpedia/public.pdf,,,dbpedia 14,cls/topic,,,,Lehmann et al. 2015
+trec,,topic_classification,cls,,,TRUE,TRUE,,5452,5452,TRUE,,5452,,,https://trec.nist.gov/data/qa.html,,,trec,cls/other,,,,Li and Roth 2002; Hovy et al. 2001
+super_glue,wic,word_sense_disambiguation,cls,,,TRUE,,TRUE,5428,0,TRUE,TRUE,5428,accuracy,,https://arxiv.org/pdf/1808.09121.pdf,,,superglue-wic,cls/other,,lexical_knowledge,,Pilehvar and Camacho-Collados 2019
+Staging Area,,,,,,,,,,,,,,,,,,,,,,,,
+Would Include but not in HF or some other practical limitations,,,,,,,,,,,,,,,,,,,,,,,,
+definite_pronoun_resolution,,coreference,,todo: download error,,,,,,,,,,,,,,,definite pronoun resolution,other,,,,Rahman and Ng 2012
+jeopardy,,closed-book qa,gen,sporadic download error,CrossFit,,,,,,,,,,,,,promptsource download error,jeopardy,qa/closed-book qa,,,,(link)
+blimp,,,cls,no prompts yet; collapse subsets,,,,,,0,,,0,,,,,,,,,,,
+Hendrycks et al. 2021,,,,https://arxiv.org/abs/2009.03300v3,,,,,,,,,,,,,,,,,,,,
+Multi-Turn Dialogue Reasoning,,,,https://aclanthology.org/2020.acl-main.130.pdf,Vania,,,,7088,,,,,,,,,,,,,,,
+Argument Reasoning Comprehension Task,,,,https://aclanthology.org/N18-1175.pdf,Vania,,,,1211,,,,,,,,,,,,,,,
+MCScript,,,,https://aclanthology.org/L18-1564.pdf,Vania,,,,14191,,,,,,,,,,,,,,,
+narrativeqa,,,,very long input sequence,,,,,,,,,,,,,,skip for experiment D3: very long input sequence,NarQA,Abstractive QA,,,,
+newsqa,,,,download error,TaskEmbed,,,,,,,,,,,,,promptsource download error,NewsQA,Extractive QA,,,,Trischler et al. 2017
+eli5,,,,dataset split error,CrossFit,,,,,,,,,,,https://facebookresearch.github.io/ELI5/explore.html,,skip: HF datasets error the split field is used for subsets,eli5-askh,qa/long-form qa,,possibly knowledge-neutral,,Fan et al. 2019
+Maybe Reconsider,,,,,,,,,,,,,,,,,,,,,,,,
+zest,,,,its original task is quite complex (need to provide a decision function); should be held-out eval only,self,,,,,,,,,,,,,,,,,,,
+swag,,story_completion,cls,revisit whether this should be considered as a variant of NLI,,,,,73546,0,TRUE,,73546,,,,,,swag,qa/multiple-choice qa,,,,Zellers et al. 2018
+codah,codah,story_completion,cls,a variant of swag revisit whether this should be considered as a variant of NLI,,,,,2776,0,TRUE,,2776,,,,,,codah,qa/multiple-choice qa,,,,Chen et al. 2019
+wiki_auto,,,,revisit: lots of duplicate simplified text; novel generative task could be very challenging,CrossFit,,,,,,,,,,,,,no prompt yet,wiki auto,cls/other,,text simplification,,Jiang et al. 2020
+proto_qa,,,gen,"generate prototypical concepts, kinda niche format with multiple correct answers",CrossFit,,,,,,,,,,,,,no prompt yet,proto qa,other,,,,Boratko et al. 2020
+empathetic_dialogues,,,,generation? classification?,CrossFit,,,,,,,,,,,https://arxiv.org/pdf/1811.00207.pdf,,no prompt yet,empathetic dialogues,cg/dialogue,,,,Rashkin et al. 2019
+qed,,,,uses held-out Natural Questions,,,,,,,,,,,,,,,,,,,,
+kilt_tasks,aidayago2,,,,,,,,,,,,,,,,,no prompt yet,kilt ay2,other/entity linking,,encyclopedia,,Hoffart et al. 2011
+kilt_tasks,wow,,,,,,,,,,,,,,,,,no prompt yet,kilt wow,cg/dialogue,,encyclopedia,,Dinan et al. 2019
+lama,conceptnet,,,,,,,,,,,,,,,,,no prompt yet,lama-conceptnet,qa/closed-book qa,,encyclopedia,,Petroni et al. 2019 2020
+lama,google_re,,,,,,,,,,,,,,,,,no prompt yet,lama-google re,qa/closed-book qa,,encyclopedia,,Petroni et al. 2019 2020
+lama,squad,,,,,,,,,,,,,,,,,no prompt yet,lama-squad,qa/closed-book qa,,encyclopedia,,Petroni et al. 2019 2020
+lama,trex,,,,,,,,,,,,,,,,,no prompt yet,lama-trex,qa/closed-book qa,,encyclopedia,,Petroni et al. 2019 2020
+limit,,physical cognition,,,,,,,,,,,,,,https://aclanthology.org/2020.findings-emnlp.88.pdf,,label errors in dataset itself? also no validation set otherwise well motivated by semantic theories,limit,other,,physical semantic repr.,,Manotas et al. 2020
+kilt_tasks,fever,,,revisit whether this should be considered as a variant of NLI,,,,,,,,,,,,,,temporary skip: prompts available in non-benchmark standalone dataset,kilt fever,cls/fact checking,,encyclopedia,,Thorne et al. 2018
+Skipped,,,,,,,,,,,,,,,,,,,,,,,,
+fever,v2.0,closed-book qa/fact checking,,also in KILT,,,,,,,,,,,,,,skip: awkward prompts as closed-book qa,FEVER,,,,,
+hotpot_qa,distractor,,,also in KILT,,,,,,,,,,,,,,skip for experiment D3: very long input sequence,Hotpot QA,,,,,
+hotpot_qa,fullwiki,,,also in KILT,,,,,,,,,,,,,,skip for experiment D3: very long input sequence,Hotpot QA,,,,,
+emo,,sentiment,cls,skip: offensive and ungrammatical text,,merged,,,30160,0,TRUE,TRUE,30160,precision;recall;F1,,https://aclanthology.org/S19-2005.pdf,,skip: offensive and ungrammatical text,emo,cls/emotion,,,,Chatterjee et al. 2019
+freebase_qa,,QA_closed_book,gen,"need to be held out because web_questions is ""supposed to be answerable by Freebase""",,,,,20358,0,TRUE,,20358,,,,intensive,,freebase qa,qa/closed-book qa,,,,Jiang et al. 2019
+aqua_rat,,,,,,,,,,,,,,,,https://arxiv.org/abs/1705.04146,,skip: nontrivial math,aqua rat,qa/multiple-choice qa,,nontrivial math,,Ling et al. 2017
+math_qa,,,,,,,,,,,,,,,,,,skip: nontrivial math,math qa,qa/multiple-choice qa,,nontrivial math,,Amini et al. 2019
+numer_sense,,,,,,,,,,,,,,,,,,skip: closed-book trivia ,numer sense,qa/closed-book qa,,numerical knowledge,,Lin et al. 2020a
+squad_adversarial,,,,,,,,,,,,,,,,,,validation set only,,,,,,
+squadshifts,,,,,,,,,,,,,,,,,,test set only,,,,,,
+sms_spam,,,,,,,,,,,,,,,,,,skip: unclean corpus and likely harmful content,sms spam,cls/other,,,,Almeida et al. 2011
+search_qa,,,,,,,,,,,,,,,,,,skip: seems like a very unclean corpus,search qa,qa/closed-book qa,,,,Dunn et al. 2017
+kilt_tasks,trex,,,,,,,,,,,,,,,,,skip: non-natural language,kilt trex,qa/closed-book qa,,encyclopedia,,Elsahar et al. 2018
+kilt_tasks,structured_zeroshot,,,,,,,,,,,,,,,,,skip: non-natural language,kilt zsre,qa/closed-book qa,,encyclopedia,,Levy et al. 2017
+spider,,,,,,,,,,,,,,,,,,skip: non-natural language,spider,cg/other,,,,Yu et al. 2018
+wikisql,,,,,,,,,,,,,,,,,,skip: non-natural language,wikisql,cg/other,,,,Zhong et al. 2017
+com_qa,,,,,CrossFit,,,,,,,,,,,https://arxiv.org/pdf/1809.09528.pdf,,skip: non-human language: URL,ComQA (Abujabal et al. 2019),factoid QA w/ paraphrases,,,snippets WikiAnswers,
+climate_fever,,,,revisit whether this should be considered as a variant of NLI,,,,,,,,,,,,,,skip: no train set,climate fever,cls/fact checking,,,,Diggelmann et al. 2020
+art,,,,,,,,,,,,,,,,https://arxiv.org/pdf/1908.05739.pdf,,skip: NLI reserved for generalization studies (although this one is not a traditionally defined NLI),art (abductive nli),other,,,,Bhagavatula et al. 2020
+glue,mnli,classification_NLI,,,,,,,,,,,,,,,,skip: NLI reserved for generalization studies,glue-mnli,cls/nli,,,,Williams et al. 2018
+glue,qnli,classification_NLI,,,,,,,,,,,,,,,,skip: NLI reserved for generalization studies,glue-qnli,cls/nli,,,,Rajpurkar et al. 2016
+glue,rte,classification_NLI,,,,,,,,,,,,,,,,skip: NLI reserved for generalization studies,glue-rte,cls/nli,,,,Dagan et al. 2005; Bar-Haim et al. 2006 Giampiccolo et al. 2007; Bentivogli et al. 2009
+glue,wnli,classification_NLI,,,,,,,,,,,,,,,,skip: NLI reserved for generalization studies,glue-wnli,cls/nli,,,,Levesque et al. 2012
+,,classification_NLI,,,,,,,,,,,,,,,,skip: NLI reserved for generalization studies,scitail,cls/nli,,,,Khot et al. 2018
+,,classification_NLI,,,,,,,,,,,,,,,,skip: NLI reserved for generalization studies,sick,cls/nli,,,,Marelli et al. 2014
+,,classification_NLI,,,,,,,,,,,,,,,,skip: NLI reserved for generalization studies,SNLI (Bowman et al. 2015),NLI,,,misc.,
+aeslc,,,,summarization by email subject line,,,,,,,,,,,,https://arxiv.org/abs/1906.03497,,skip: niche task,aeslc,cg/summarization,,generation,,Zhang and Tetreault 2019
+onestop_english,,,,,,,,,,,,,,,,https://aclanthology.org/W18-0535.pdf,,skip: niche task: classify curriculum diffculty,onestop english,cls/other,,,,Vajjala and Luˇci´c 2018
+mocha,,,,,,,,,,,,,,,,,,skip: model generated text,mocha,other/regression,,,,Chen et al. 2020a
+commonsense_qa,,,,duplicate with cos_e,Vania,,,,9741,,,,,,,https://arxiv.org/pdf/1811.00937.pdf,,,Commonsense QA,qa/multiple-choice qa,,,,Talmor et al. 2019
+,,,,,,,,,,,,,,,,,,skip: maybe harmful content from Twitter,emotion,cls/emotion,,,,Saravia et al. 2018
+,,,,the authors themselves seem to have renounced their own work,,,,,,,,,,,,https://github.com/nyu-mll/crows-pairs,,skip: harmful content,crows pairs,other,,,,Nangia et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,ethos-directed vs generalized,cls/hate speech detection,,,,Mollas et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,ethos-disability,cls/hate speech detection,,,,Mollas et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,ethos-gender,cls/hate speech detection,,,,Mollas et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,ethos-national origin,cls/hate speech detection,,,,Mollas et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,ethos-race,cls/hate speech detection,,,,Mollas et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,ethos-religion,cls/hate speech detection,,,,Mollas et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,ethos-sexual orientation,cls/hate speech detection,,,,Mollas et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,hate speech offensive,cls/hate speech detection,,,,Davidson et al. 2017
+,,,,,,,,,,,,,,,,,,skip: harmful content,hate speech18,cls/hate speech detection,,,,de Gibert et al. 2018
+,,,,,,,,,,,,,,,,,,skip: harmful content,hatexplain,cls/hate speech detection,,,,Mathew et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,reddit tifu-title,cg/summarization,,,,Kim et al. 2019
+,,,,,,,,,,,,,,,,,,skip: harmful content,reddit tifu-tldr,cg/summarization,,,,Kim et al. 2019
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-emoji,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-emotion,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-hate,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-irony,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-offensive,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-sentiment,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-stance abortion,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-stance atheism,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-stance climate,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-stance feminist,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet eval-stance hillary,cls/emotion,,,,Barbieri et al. 2020
+,,,,,,,,,,,,,,,,,,skip: harmful content,tweet qa,qa/machine reading comprehension,,,,Xiong et al. 2019
+yelp_polarity,,,,,,,,,,,,,,,,,,skip: duplicate with yelp_review_full,yelp polarity,cls/sentiment analysis,,,,Zhang et al. 2015; (link)
+quora,,,,,,,,,,,,,,,,https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs,,skip: duplicate under GLUE,QQP,paraphrase identification,,,social QA,Iyer et al. 2017
+squad,,,,,,,,,,,,,,,,,,skip: duplicate under Squad 2.0,SQuAD 1.1,Extractive QA,,,,
+yahoo_answers_topics,,,,,,,,,,,,,,,,,,skip for early experiments: unclean corpus,yahoo answers topics,cls/topic,,,,(link)
+tab_fact,,,,,,,,,,,,,,,,,,skip for early experiments: tabular data,tab fact,cls/fact checking,,,,Chen et al. 2020b
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-anaphor gender agreement,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-anaphor number agreement,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-determiner noun agreement with adj irregular 1,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-ellipsis n bar 1,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-ellipsis n bar 2,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-existential there quantifiers 1,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-irregular past participle adjectives,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-sentential negation npi licensor present,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-sentential negation npi scope,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: revisit if we want to include a large number of ungrammatical sentences in our training data,blimp-wh questions object gap,other/linguistic phenomenon,,syntax,,Warstadt et al. 2020
+poem_sentiment,,,,,,,,,,,,,,,,,,skip for early experiments: poetry domain,poem sentiment,cls/sentiment analysis,,creativity,,Sheng and Uthus 2020
+acronym_identification,,,,,,,,,,,,,,,,https://arxiv.org/pdf/2010.14678.pdf,,skip for early experiments: niche/hard task,acronym identification,other,,,,Pouran Ben Veyseh et al. 2020
+google_wellformed_query,,,,revisit whether to exclude fine-grain regression tasks,,,,,,,,,,,,,,skip for early experiments: niche/hard task,google wellformed query,cls/other,,,,Faruqui and Das 2018
+liar,,,,revisit whether to exclude fine-grain regression tasks,,,,,,,,,,,,,,skip for early experiments: niche/hard task,liar,cls/fact checking,,,,Wang 2017
+,,,,,,,,,,,,,,,,,,skip for early experiments: niche/hard task,break-QDMR-high-level,other,,semantic representation,,Wolfson et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: niche/hard task,crawl domain,other,,,,Zhang et al. 2020
+discovery,discovery,,,,,,,,,,,,,,,,,skip for early experiments: niche task no cannonical answer,discovery,cls/other,,generative-ish,,Sileo et al. 2019
+wiki_split,,,,,,,,,,,,,,,,,,skip for early experiments: niche task,wiki split,cg/other,,,,Botha et al. 2018
+,,,,,,,,,,,,,,,,,,skip for early experiments: multilingual,aslg pc12,other,,,,Othman and Jemni 2012
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,CCG (Hockenmaier and Steedman 2007),CCG supertagging,,syntax,Penn Treebank,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,Chunk (Tjong Kim Sang and Buchholz 2000),syntactic chunking,,syntax,Penn Treebank,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,Conj (Ficler and Goldberg 2016),conjunct identification,,syntax,Penn Treebank,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,GED (Yannakoudakis et al. 2011),grammatical error detection,,syntax,misc.,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,GGParent (Liu et al. 2019a),syntactic tagging,,syntax,Penn Treebank,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,GParent (Liu et al. 2019a),syntactic tagging,,syntax,Penn Treebank,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,NER (Tjong Kim Sang and De Meulder 2003),named entity recognition,,,news,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,Parent (Liu et al. 2019a),syntactic tagging,,syntax; constituency,Penn Treebank,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,POS-EWT (Silveira et al. 2014),part-of-speech tagging,,syntax,Web Treebank,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,POS-PTB (Marcus et al. 1993),part-of-speech tagging,,syntax,Penn Treebank,
+,,,,,,,,,,,,,,,,,,skip for early experiments: input token/span classification less straightforward for a generative LM,ST (Bjerva et al. 2016),semantic tagging,,,Groningen Meaning Bank,
+financial_phrasebank,,,,,,,,,,,,,,,,,,skip for early experiments: financial domain,financial phrasebank,cls/sentiment analysis,,,,Malo et al. 2014
+health_fact,,,,,,,,,,,,,,,,,,skip for early experiments: biomedical domain,health fact,cls/fact checking,,,,Kotonya and Toni 2020
+,,,,,,,,,,,,,,,,http://www.sciencedirect.com/science/article/pii/S1532046412000615,,skip for early experiments: biomedical domain,ade corpus v2-classification,cls/other,,,,Gurulingappa et al. 2012
+,,,,,,,,,,,,,,,,,,skip for early experiments: biomedical domain,ade corpus v2-dosage,other/slot filling,,,,Gurulingappa et al. 2012
+,,,,,,,,,,,,,,,,,,skip for early experiments: biomedical domain,ade corpus v2-effect,other/slot filling,,,,Gurulingappa et al. 2012
+,,,,,,,,,,,,,,,,,,skip for early experiments: biomedical domain,biomrc,qa/machine reading comprehension,,,,Pappas et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: biomedical domain,medical questions pairs,cls/paraphrase,,,,McCreery et al. 2020
+scicite,,,,,,,,,,,,,,,,,,skip for early experiments: academic domain + niche/hard task,scicite,cls/other,,,,Cohan et al. 2019
+,,,,,,,,,,,,,,,,,,skip for early experiments: abstract semantic representations,break-QDMR,other,,logical form,,Wolfson et al. 2020
+,,,,,,,,,,,,,,,,,,skip for early experiments: abstract semantic representations,e2e nlg cleaned,other,,,,Duˇsek et al. 2020 2019
+glue,sst2,,,,,,,,,,,,,,,,,revisit: very short and often ill-formed movie reviews,glue-sst2,cls/sentiment analysis,,,,Socher et al. 2013
+glue,stsb,fine-grain regression,,,,,,,,,,,,,,,,revisit whether to exclude fine-grain regression tasks,glue-stsb,semantic similarity,,,misc.,
+,,,,,,,,,,,,,,,,,,double check: subset missing from HF datasets,squad-no context,qa/closed-book qa,,,,Rajpurkar et al. 2016
+,,,,,,,,,,,,,,,,,,double check: subset missing from HF datasets,squad-with context,qa/machine reading comprehension,,,,Rajpurkar et al. 2016
+,,,,contrast sets,,,,,,,,,,,,https://arxiv.org/pdf/2004.02709.pdf,,double check: missing from HF datasets,BoolQ-CS,Binary yes/no,,,,
+,,,,,,,,,,,,,,,,https://aclanthology.org/C16-1236.pdf,,double check: missing from HF datasets,CQ (Bao et al. 2016),knowledge-based QA,,,snippets web queries/KB,
+,,,,contrast sets,,,,,,,,,,,,https://arxiv.org/pdf/2004.02709.pdf,,double check: missing from HF datasets,DROP-CS,Abstractive QA,,,,
+,,,,,,,,,,,,,,,,https://aclanthology.org/D13-1020.pdf,,double check: missing from HF datasets,MCTest,Multiple choice,,,,
+,,,,,,,,,,,,,,,,,,double check: missing from HF datasets,MRPC (Dolan and Brockett 2005),paraphrase identification,,,news,
+,,,,"""naturally perturbed"" version of BoolQ",,,,,,,,,,,,https://arxiv.org/pdf/2004.04849.pdf,,double check: missing from HF datasets,NP-BoolQ,Binary yes/no,,,,
+,,,,,,,,,,,,,,,,https://aclanthology.org/D19-1608.pdf,,double check: missing from HF datasets,quartz-no knowledge,qa/multiple-choice qa,,,,Tafjord et al. 2019b
+,,,,contrast sets,,,,,,,,,,,,https://arxiv.org/pdf/2004.02709.pdf,,double check: missing from HF datasets,Quoref-CS,Extractive QA,,,,
+,,,,contrast sets,,,,,,,,,,,,https://arxiv.org/pdf/2004.02709.pdf,,double check: missing from HF datasets,ROPES-CS,Extractive QA,,,,
diff --git a/promptsource/seqio_tasks/preview_annotated_prompts.py b/promptsource/seqio_tasks/preview_annotated_prompts.py
new file mode 100644
index 0000000000000000000000000000000000000000..6890d5247a081c70a9056eddca0d0e82895b2f4f
--- /dev/null
+++ b/promptsource/seqio_tasks/preview_annotated_prompts.py
@@ -0,0 +1,111 @@
+import csv
+from pprint import pprint
+from typing import Dict, List
+
+import pkg_resources
+from t5.data.glue_utils import get_glue_metric, get_super_glue_metric
+from t5.evaluation.metrics import accuracy, mean_multiclass_f1, rouge
+
+
+SAFE_EXCLUDE_CRETERIA = [
+    "template_bug",
+    "negated_answers",
+    "counting",
+    "answer_span_indices",
+    "non_natural_language",
+    "generative_non_true_implausible",
+]
+
+AGGRESSIVE_EXCLUDE_CRETERIA = [
+    "generative_non_true_task",
+    "nontrivial_choices_hidden",
+    "awkward_phrasing",
+    "ungrammatical",
+] + SAFE_EXCLUDE_CRETERIA
+
+
+NON_GLUE_METRICS = {  # for those with do_eval = True
+    "anli": [accuracy],
+    "hans": [accuracy],
+    "circa_goldstandard1_judgement": [mean_multiclass_f1(num_classes=8), accuracy],
+    "circa_goldstandard2_judgement": [mean_multiclass_f1(num_classes=5), accuracy],
+    "mc_taco": [accuracy],
+    "nq_open": [accuracy],
+    "qa_srl": [accuracy],
+    "openbookqa": [accuracy],
+    "race": [accuracy],
+    "social_i_qa": [accuracy],
+    "emo": [mean_multiclass_f1(num_classes=4)],
+    "xsum": [rouge],
+}
+
+
+def exclude_bad_prompts(prompt: Dict) -> bool:
+    for criterion in SAFE_EXCLUDE_CRETERIA:  # or AGGRESSIVE_EXCLUDE_CRETERIA
+        if prompt.get(criterion):
+            return False
+    return True
+
+
+def load_annotated_prompts() -> List[Dict]:
+    annotated_csv_path = pkg_resources.resource_filename(__name__, "experiment_D3.csv")
+    with open(annotated_csv_path) as in_file:
+        reader = csv.DictReader(in_file)
+        all_tasks = [row for row in reader]
+
+    clean_tasks = list(filter(exclude_bad_prompts, all_tasks))
+
+    # Assign metrics
+    non_glue_eval_sets = list(NON_GLUE_METRICS.keys())
+    for task in clean_tasks:
+        if not task["do_eval"]:
+            continue
+
+        full_name = task["dataset_subset_template"]
+        if full_name.startswith("glue"):
+            subset = full_name.split("_")[1]
+            task["metrics"] = get_glue_metric(subset)
+        elif full_name.startswith("super_glue"):
+            subset = full_name.split("_")[2]
+            if subset in ("wsc.fixed", "multirc"):
+                # TODO: WSC and MultiRC need special pre/postprocesing
+                task["metrics"] = [accuracy]
+                continue
+            task["metrics"] = get_super_glue_metric(subset)
+
+        for dataset_name in non_glue_eval_sets:
+            if full_name.startswith(dataset_name):
+                task["metrics"] = NON_GLUE_METRICS[dataset_name]
+
+        # Skip rank_classification for now until we actually support it
+        # if task["nontrivial_choices_hidden"]:
+        #     # Trick of plugging in answer options and rank LM probabilites as predictions.
+        #     # Required for all prompts with non_trivial_choices_hidden,
+        #     # but could be used for other tasks as well where answer choices are given.
+        #     if "metrics" not in task:
+        #         task["metrics"] = [rank_classification]
+        #     elif rank_classification not in task["metrics"]:
+        #         task["metrics"].append(rank_classification)
+
+        # should be already handled by NON_GLUE_METRICS
+        # if task['generative_true_task'] or task['generative_non_true_task']:
+        #     task['metrics'] = rouge
+
+    return clean_tasks
+
+
+def preview() -> None:
+    clean_tasks = load_annotated_prompts()
+
+    train_tasks = [t for t in clean_tasks if not t["skip_train"]]
+    eval_tasks = [t for t in clean_tasks if t["do_eval"]]
+
+    pprint([t["dataset_subset_template"] for t in train_tasks])
+    print(len(train_tasks))
+
+    pprint([f'{t["dataset_subset_template"]} {t["metrics"]}' for t in eval_tasks])
+    print(len(eval_tasks))
+
+
+if __name__ == "__main__":
+    preview()
diff --git a/promptsource/seqio_tasks/preview_promptsource.py b/promptsource/seqio_tasks/preview_promptsource.py
new file mode 100644
index 0000000000000000000000000000000000000000..4dbbec7615aded5c895f41f2f66d6cd90589db3b
--- /dev/null
+++ b/promptsource/seqio_tasks/preview_promptsource.py
@@ -0,0 +1,105 @@
+import csv
+from typing import List, Optional, Tuple
+
+import pkg_resources
+
+# from rich import inspect
+from rich.pretty import pprint
+
+from promptsource.templates import TemplateCollection
+
+
+def preview() -> None:
+    experiment_path = pkg_resources.resource_filename(__name__, "experiment_D4.csv")
+    gsheet = {}
+    d4_train: List[Tuple[str, Optional[str]]] = []
+    d4_eval: List[Tuple[str, Optional[str]]] = []
+    d3_train_gpt: List[Tuple[str, Optional[str]]] = []
+    d3_train_sglue: List[Tuple[str, Optional[str]]] = []
+    experiment_path = pkg_resources.resource_filename(__name__, "experiment_D4.csv")
+    with open(experiment_path) as exp_file:
+        reader = csv.DictReader(exp_file)
+        for row in reader:
+            if row["skip"]:
+                continue
+            if row["subset"] == "":
+                row["subset"] = None  # to match promptsource.Template object
+            dataset_subset = (row["HF_name"], row["subset"])
+            if row["do_train"] == "TRUE":
+                d4_train.append(dataset_subset)
+            if row["do_eval"] == "TRUE":
+                d4_eval.append(dataset_subset)
+            if row["D3_do_train"] == "TRUE" and "GPT" in row["seed_paper"]:
+                d3_train_gpt.append(dataset_subset)
+            if row["D3_do_train"] == "TRUE" and row["HF_name"] == "super_glue":
+                d3_train_sglue.append(dataset_subset)
+            gsheet[dataset_subset] = row
+    all_datasets = d4_train + d4_eval + d3_train_gpt + d3_train_sglue
+    print(f"Number of non-desk-rejected datasets = {len(all_datasets)}")
+    print(f"Number of training sets = {len(d4_train)}")
+    print(f"Number of evaluation sets = {len(d4_eval)}")
+
+    template_collection = TemplateCollection()
+    output = []
+    missing_og_flags = []
+    missing_metrics = []
+    for dataset_name, subset_name in template_collection.keys:
+        ds_name = (dataset_name, subset_name)
+        if ds_name not in d4_eval:
+            template_collection.remove(dataset_name, subset_name)
+            continue
+        OG = 0
+        non_OG = 0
+        dataset = template_collection.get_dataset(dataset_name, subset_name)
+        for template_name in dataset.all_template_names:
+            template = dataset[template_name]
+            # if dataset_name == 'ropes':
+            #     inspect(template.metadata)
+            if not template.metadata.metrics:
+                missing_metrics.append(f"{dataset_name}/{subset_name}/{template_name}")
+
+            if template.metadata.original_task is True:
+                OG += 1
+            elif template.metadata.original_task is False:
+                non_OG += 1
+            elif template.metadata.original_task is None:
+                missing_og_flags.append(dataset_name + "/" + template_name)
+                continue
+
+        train_size = gsheet[ds_name]["train_size"]
+        if train_size == "":
+            train_size = 0
+        else:
+            train_size = int(train_size)
+
+        adjusted_train_size = train_size // len(dataset.all_template_names)
+
+        output.append(
+            (
+                f"{dataset_name} {subset_name if subset_name else ''}",
+                f"{OG}-{non_OG}",
+                f"{train_size:,}    {adjusted_train_size:,}",
+            )
+        )
+
+    pprint(output)
+    print(len(template_collection))
+
+    print("Missing metrics:")
+    pprint(missing_metrics)
+
+    print("Missing original task flags:")
+    pprint(missing_og_flags)
+
+    # # print(d4_train_mixture)
+    # print(f"Number of training templates = {len(d4_train_mixture)}")
+    # # print(d4_eval_mixture)
+    # print(f"Number of evaluation templates = {len(d4_eval_mixture)}")
+    # # for i in seqio.TaskRegistry.names():
+    # #     print(i)
+    # print(f"Number of SeqIO registered templates = {len(seqio.TaskRegistry.names())}")
+    # print("^ includes non-original task templates which are excluded from the eval mixture")
+
+
+if __name__ == "__main__":
+    preview()
diff --git a/promptsource/seqio_tasks/tasks.py b/promptsource/seqio_tasks/tasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b39719e11b615a5292138382676c2f48ed935cd
--- /dev/null
+++ b/promptsource/seqio_tasks/tasks.py
@@ -0,0 +1,421 @@
+import csv
+import functools
+from typing import Dict, List, Optional, Tuple
+
+import datasets
+import pkg_resources
+import seqio
+import t5
+import tensorflow as tf
+from t5.data.glue_utils import get_glue_metric, get_super_glue_metric
+from t5.evaluation import metrics as mt
+
+import promptsource.templates
+from promptsource.seqio_tasks import utils
+
+
+GET_METRICS = {
+    "BLEU": mt.bleu,
+    "ROUGE": mt.rouge,
+    "Span Squad": mt.span_squad,
+    "Squad": mt.squad,
+    "Trivia QA": mt.trivia_qa,
+    "Accuracy": mt.accuracy,
+    "Sequence Accuracy": mt.sequence_accuracy,
+    "Pearson Correlation": mt.pearson_corrcoef,
+    "Spearman Correlation": mt.spearman_corrcoef,
+    "MultiRC": mt.multirc_f1_over_all_answers,
+    "AUC": mt.auc,
+    "COQA F1": mt.coqa_f1,
+    "Edit Distance": mt.edit_distance,
+    # "Mean Reciprocal Rank": mt.accuracy,  # NOTE not in T5?
+    "Other": mt.accuracy,
+    # Missing support for mean_multiclass_f1 etc. which need a num_classes parameter
+}
+
+MAX_EXAMPLES_PER_DATASET = 500_000
+
+
+def strip_whitespace(output_or_target, example=None, is_target=False):
+    """Cached tasks from promptsource all have a leading space on the ground-truth targets."""
+    return output_or_target.strip()
+
+
+def maybe_get_class_id_postprocessor(template):
+    if template.get_fixed_answer_choices_list():
+
+        def postprocess_fn(output_or_target, example=None, is_target=False):
+            output_or_target = strip_whitespace(output_or_target)
+            return t5.data.postprocessors.string_label_to_class_id(
+                output_or_target, label_classes=template.get_fixed_answer_choices_list()
+            )
+
+        return postprocess_fn
+
+    else:
+        return strip_whitespace
+
+
+def get_tf_dataset(split, shuffle_files, seed, dataset_name, subset_name, template, split_mapping):
+    # HF datasets does not support file-level shuffling
+    del shuffle_files, seed
+    dataset = datasets.load_dataset(dataset_name, subset_name)
+    dataset = dataset[split_mapping[split]]
+    dataset = utils.apply_template(dataset, template)
+    return utils.hf_dataset_to_tf_dataset(dataset)
+
+
+def add_task(dataset_name, subset_name, template_name, task_name=None, split_mapping=None):
+    template = all_templates.get_dataset(dataset_name, subset_name)[template_name]
+    task_name = task_name or utils.get_task_name(dataset_name, subset_name, template_name)
+
+    if dataset_name == "glue":
+        metrics = get_glue_metric(subset_name)
+    elif dataset_name == "super_glue":
+        if subset_name in ("wsc.fixed", "multirc"):
+            # TODO: WSC and MultiRC need special pre/postprocesing
+            metrics = [mt.accuracy]
+        else:
+            metrics = get_super_glue_metric(subset_name)
+    else:
+        # TODO what if metric is null?
+        metrics = [GET_METRICS[m] for m in template.metadata.metrics]
+
+    dataset_splits = utils.get_dataset_splits(dataset_name, subset_name)
+    split_mapping = split_mapping or {k: k for k in dataset_splits.keys()}
+
+    dataset_fn = functools.partial(
+        get_tf_dataset,
+        seed=None,
+        dataset_name=dataset_name,
+        subset_name=subset_name,
+        template=template,
+        split_mapping=split_mapping,
+    )
+    data_source = seqio.FunctionDataSource(
+        dataset_fn,
+        splits=list(split_mapping.keys()),
+        num_input_examples={s: dataset_splits[split_mapping[s]].num_examples for s in split_mapping.keys()},
+    )
+    output_features = {
+        "inputs": seqio.Feature(t5.data.get_default_vocabulary(), add_eos=False, dtype=tf.int32),
+        "targets": seqio.Feature(t5.data.get_default_vocabulary(), add_eos=True, dtype=tf.int32),
+    }
+    preprocessors = [
+        seqio.preprocessors.tokenize,
+        seqio.preprocessors.append_eos,
+        seqio.CacheDatasetPlaceholder(required=False),
+    ]
+
+    # Add train and normal eval tasks
+    seqio.TaskRegistry.add(
+        task_name,
+        data_source,
+        preprocessors=preprocessors,
+        output_features=output_features,
+        metric_fns=metrics,
+        postprocess_fn=maybe_get_class_id_postprocessor(template),
+    )
+
+    # Add rank classification eval task
+    if template.answer_choices:
+        rank_classification_preprocessor = functools.partial(
+            t5.data.preprocessors.rank_classification,
+            inputs_fn=lambda ex: tf.fill((len(ex["answer_choices"]),), ex["inputs"]),
+            targets_fn=lambda ex: ex["answer_choices"],
+            is_correct_fn=lambda ex: tf.equal(ex["answer_choices"], tf.strings.strip(ex["targets"])),
+            weight_fn=lambda ex: 1.0,
+        )
+
+        fixed_choices = template.get_fixed_answer_choices_list()
+        num_classes = len(fixed_choices) if fixed_choices else None
+        seqio.TaskRegistry.add(
+            task_name + "_score_eval",
+            data_source,
+            preprocessors=[rank_classification_preprocessor] + preprocessors,
+            output_features=output_features,
+            metric_fns=[functools.partial(t5.evaluation.metrics.rank_classification, num_classes=num_classes)],
+            postprocess_fn=t5.data.postprocessors.rank_classification,
+        )
+
+
+datatset_subset_tuple = Tuple[str, Optional[str]]
+d4_train: List[datatset_subset_tuple] = []
+d4_eval: List[datatset_subset_tuple] = []
+d3_train_gpt: List[datatset_subset_tuple] = []
+d3_train_sglue: List[datatset_subset_tuple] = []
+bias_fairness_eval: List[datatset_subset_tuple] = []
+gsheet: Dict[datatset_subset_tuple, Dict] = {}
+experiment_path = pkg_resources.resource_filename(__name__, "experiment_D4.csv")
+with open(experiment_path) as exp_file:
+    reader = csv.DictReader(exp_file)
+    for row in reader:
+        if row["skip"]:
+            continue
+        if row["subset"] == "":
+            row["subset"] = None  # to match promptsource.Template object
+        dataset_subset = (row["HF_name"], row["subset"])
+        if row["do_train"] == "TRUE":
+            d4_train.append(dataset_subset)
+        if row["do_eval"] == "TRUE":
+            d4_eval.append(dataset_subset)
+        if row["D3_do_train"] == "TRUE" and "GPT" in row["seed_paper"]:
+            d3_train_gpt.append(dataset_subset)
+        if row["D3_do_train"] == "TRUE" and row["HF_name"] == "super_glue":
+            d3_train_sglue.append(dataset_subset)
+        if (
+            row["do_eval"] == "TRUE"
+            and row["task_by_convention"] == "bias_and_fairness"
+            and row["HF_name"] != "winogender"
+        ):
+            bias_fairness_eval.append(dataset_subset)
+        gsheet[dataset_subset] = row
+all_datasets = d4_train + d4_eval + d3_train_gpt + d3_train_sglue + bias_fairness_eval
+
+all_templates = promptsource.templates.TemplateCollection()
+all_templates.remove("anli")  # Need to special-case ANLI due to weird split conventions
+
+# 3 stages of training/ablation: D4 -> GPT -> SuperGLUE
+d4_train_mixture: List[str] = []  # strings are dataset_subset_template
+gpt_train_mixture: List[str] = []
+sglue_train_mixture: List[str] = []
+d4_eval_mixture: List[str] = []
+bias_fairness_eval_mixture: List[str] = []
+mixture_cap: Dict[str, int] = {}
+single_original_task: Dict[Tuple[str, str], str] = {}
+all_original_tasks: List[str] = []
+for dataset_name, subset_name in all_templates.keys:
+    if (dataset_name, subset_name) not in all_datasets:
+        all_templates.remove(dataset_name, subset_name)
+        continue
+
+    dataset = all_templates.get_dataset(dataset_name, subset_name)
+    num_templates = len(dataset.all_template_names)
+    train_size = gsheet[(dataset_name, subset_name)]["train_size"]
+    if train_size == "":
+        train_size = 0
+    else:
+        train_size = int(train_size)
+    if train_size > MAX_EXAMPLES_PER_DATASET:
+        cap = MAX_EXAMPLES_PER_DATASET // num_templates
+    else:
+        cap = train_size
+    for template_name in dataset.all_template_names:
+        add_task(dataset_name, subset_name, template_name)
+
+        template = dataset[template_name]
+
+        task_name = utils.get_task_name(dataset_name, subset_name, template_name)
+
+        if (dataset_name, subset_name) not in single_original_task and template.metadata.original_task:
+            single_original_task[(dataset_name, subset_name)] = task_name
+
+        if template.metadata.original_task:
+            all_original_tasks.append(task_name)
+
+        if (dataset_name, subset_name) in d4_train:
+            d4_train_mixture.append(task_name)
+            mixture_cap[task_name] = cap
+        if (dataset_name, subset_name) in d3_train_gpt:
+            gpt_train_mixture.append(task_name)
+            mixture_cap[task_name] = cap
+        if (dataset_name, subset_name) in d3_train_sglue:
+            sglue_train_mixture.append(task_name)
+            mixture_cap[task_name] = cap
+        if (dataset_name, subset_name) in d4_eval:
+            if template.metadata.original_task:
+                d4_eval_mixture.append(task_name)
+            # TODO use template.metadata.answer_choices here for rank eval
+        if (dataset_name, subset_name) in bias_fairness_eval:
+            bias_fairness_eval_mixture.append(task_name)
+
+# Special case for ANLI, which has weirdly-named splits and rounds that should be subsets
+dataset_name, subset_name = ("anli", None)
+dataset = all_templates.get_dataset(dataset_name, subset_name)
+for anli_round in ("r1", "r2", "r3"):
+    for template_name in all_templates.get_dataset(dataset_name, subset_name).all_template_names:
+        task_name = utils.get_task_name(dataset_name, subset_name, template_name) + f"_{anli_round}"
+        split_mapping = {
+            "train": f"train_{anli_round}",
+            "validation": f"dev_{anli_round}",
+            "test": f"test_{anli_round}",
+        }
+        add_task(dataset_name, subset_name, template_name, task_name, split_mapping)
+
+        template = dataset[template_name]
+        if template.metadata.original_task:
+            d4_eval_mixture.append(task_name)  # TODO or add to ANLI special mixture
+        # TODO use template.metadata.answer_choices here for rank eval
+
+
+TASK_BLACKLIST = [
+    # Tasks which often tokenize to > 1024 tokens currently
+    "hotpot_qa_distractor_Generate_Explanations",
+    "hotpot_qa_fullwiki_Generate_Explanations",
+    "hotpot_qa_distractor_Generate_Answer_and_Explanations",
+    "hotpot_qa_fullwiki_Generate_Answer_and_Explanations",
+    "hotpot_qa_fullwiki_Generate_Answer",
+    "hotpot_qa_distractor_Generate_Answer",
+    "hotpot_qa_distractor_Generate_Title_2",
+    "hotpot_qa_fullwiki_Generate_Title_2",
+    "hotpot_qa_fullwiki_Generate_Title_1",
+    "hotpot_qa_distractor_Generate_Title_1",
+    "hotpot_qa_distractor_Generate_Question",
+    "hotpot_qa_fullwiki_Generate_Question",
+    "tab_fact_tab_fact_tab_fact_3",
+    "tab_fact_tab_fact_tab_fact_2",
+    "tab_fact_tab_fact_tab_fact_1",
+    "tab_fact_tab_fact_tab_fact_7",
+    "tab_fact_tab_fact_tab_fact_4",
+    "tab_fact_tab_fact_tab_fact_5",
+    "tab_fact_tab_fact_tab_fact_6",
+    "wiki_hop_masked_Choose_Best_Object_Candidate",
+    "wiki_hop_masked_Indirect_Question_about_Birthplace_Citizenship_Place_of_Death",
+    "narrativeqa_Template_05",
+    "ecthr_cases_alleged_violation_prediction_silver_rationales",
+    # Tasks with broken cached files
+    "gigaword_summarize_",
+]
+
+# Tasks that failed caching (won't try to fix them for now) - remove when we are done
+D4_TRAIN_SCORE_EVAL_TASK_BLACKLIST = [
+    "amazon_polarity_Is_this_product_review_positive_score_eval",
+    "amazon_polarity_Is_this_review_negative_score_eval",
+    "amazon_polarity_Is_this_review_score_eval",
+    "amazon_polarity_User_recommend_this_product_score_eval",
+    "amazon_polarity_convey_negative_or_positive_sentiment_score_eval",
+    "amazon_polarity_flattering_or_not_score_eval",
+    "amazon_polarity_negative_or_positive_tone_score_eval",
+    "amazon_polarity_user_satisfied_score_eval",
+    "amazon_polarity_would_you_buy_score_eval",
+    "dbpedia_14_given_a_choice_of_categories__score_eval",
+    "dbpedia_14_given_list_what_category_does_the_paragraph_belong_to_score_eval",
+    "dbpedia_14_pick_one_category_for_the_following_text_score_eval",
+    "wiki_hop_original_choose_best_object_affirmative_1_score_eval",
+    "wiki_hop_original_choose_best_object_affirmative_2_score_eval",
+    "wiki_hop_original_choose_best_object_affirmative_3_score_eval",
+    "wiki_hop_original_choose_best_object_interrogative_1_score_eval",
+    "wiki_hop_original_choose_best_object_interrogative_2_score_eval",
+]
+
+seqio.MixtureRegistry.add(
+    "d4_train",
+    [task for task in d4_train_mixture if task not in TASK_BLACKLIST],
+    default_rate=lambda t: mixture_cap[t.name],
+)
+
+seqio.MixtureRegistry.add(
+    "gpt_train",
+    [task for task in gpt_train_mixture if task not in TASK_BLACKLIST],
+    default_rate=lambda t: mixture_cap[t.name],
+)
+
+seqio.MixtureRegistry.add(
+    "sglue_train",
+    [task for task in sglue_train_mixture if task not in TASK_BLACKLIST],
+    default_rate=lambda t: mixture_cap[t.name],
+)
+
+seqio.MixtureRegistry.add(
+    "d4_gpt_train",
+    [task for task in d4_train_mixture + gpt_train_mixture if task not in TASK_BLACKLIST],
+    default_rate=lambda t: mixture_cap[t.name],
+)
+
+seqio.MixtureRegistry.add(
+    "d4_gpt_sglue_train",
+    [task for task in d4_train_mixture + gpt_train_mixture + sglue_train_mixture if task not in TASK_BLACKLIST],
+    default_rate=lambda t: mixture_cap[t.name],
+)
+
+seqio.MixtureRegistry.add(
+    "d4_eval",
+    [task for task in d4_eval_mixture if task not in TASK_BLACKLIST],
+    default_rate=functools.partial(seqio.mixing_rate_num_examples, maximum=500_000),
+)  # eval mixture does not need to be capped
+
+
+seqio.MixtureRegistry.add(
+    "d4_score_eval",
+    [
+        task
+        for task in seqio.TaskRegistry.names()
+        if task.endswith("_score_eval")
+        and task.split("_score_eval")[0] in d4_eval_mixture
+        and task.split("_score_eval")[0] not in TASK_BLACKLIST
+    ],
+    default_rate=functools.partial(seqio.mixing_rate_num_examples, maximum=500_000),
+)
+
+# Train tasks we don't care about evaluating on
+D4_TRAIN_SKIP_EVAL = [
+    "paws_labeled_final",
+    "adversarial_qa_dbidaf",
+    "adversarial_qa_dbert",
+    "duorc_ParaphraseRC",
+    "dream",
+    "amazon_polarity",
+    "app_reviews",
+    "imdb",
+    "wiki_bio",
+    "gigaword",
+    "multi_news",
+    "samsum",
+    "dbpedia_14",
+    "trec",
+]
+
+seqio.MixtureRegistry.add(
+    "d4_train_eval",
+    [
+        task
+        for task in d4_train_mixture
+        if task not in TASK_BLACKLIST
+        and not any([skip in task for skip in D4_TRAIN_SKIP_EVAL])
+        and task in all_original_tasks
+    ],
+    default_rate=lambda t: mixture_cap[t.name],
+)
+
+seqio.MixtureRegistry.add(
+    "d4_train_score_eval",
+    [
+        task
+        for task in seqio.TaskRegistry.names()
+        if task.endswith("_score_eval")
+        and task.split("_score_eval")[0] in d4_train_mixture
+        and task.split("_score_eval")[0] not in TASK_BLACKLIST
+        and task not in D4_TRAIN_SCORE_EVAL_TASK_BLACKLIST
+        and not any([skip in task for skip in D4_TRAIN_SKIP_EVAL])
+        and task.split("_score_eval")[0] in all_original_tasks
+    ],
+    default_rate=functools.partial(seqio.mixing_rate_num_examples, maximum=500_000),
+)
+
+seqio.MixtureRegistry.add(
+    "d4_train_one_og_prompt",
+    [task for task in single_original_task.values() if task in d4_train_mixture and task not in TASK_BLACKLIST],
+    default_rate=lambda t: mixture_cap[t.name],
+)
+
+seqio.MixtureRegistry.add(
+    "d4_train_all_og_prompts",
+    [task for task in all_original_tasks if task in d4_train_mixture and task not in TASK_BLACKLIST],
+    default_rate=lambda t: mixture_cap[t.name],
+)
+
+seqio.MixtureRegistry.add(
+    "bias_fairness_eval",
+    bias_fairness_eval_mixture,
+    default_rate=functools.partial(seqio.mixing_rate_num_examples, maximum=500_000),
+)
+
+seqio.MixtureRegistry.add(
+    "bias_fairness_eval_score_eval",
+    [
+        task
+        for task in seqio.TaskRegistry.names()
+        if task.endswith("_score_eval") and task.split("_score_eval")[0] in bias_fairness_eval_mixture
+    ],
+    default_rate=functools.partial(seqio.mixing_rate_num_examples, maximum=500_000),
+)
diff --git a/promptsource/seqio_tasks/utils.py b/promptsource/seqio_tasks/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b4df95aa161ac06051c2397402342f3922342d2
--- /dev/null
+++ b/promptsource/seqio_tasks/utils.py
@@ -0,0 +1,77 @@
+import re
+
+import datasets
+import tensorflow as tf
+
+import promptsource.utils
+
+
+def feature_to_spec(feature, length=False):
+    if isinstance(feature, datasets.ClassLabel):
+        return tf.TensorSpec(shape=() if not length else (None if length == -1 else length,), dtype=tf.int64)
+    elif isinstance(feature, datasets.Value):
+        return tf.TensorSpec(
+            shape=() if not length else (None if length == -1 else length,), dtype=getattr(tf.dtypes, feature.dtype)
+        )
+    elif hasattr(feature, "dtype") and hasattr(feature, "shape"):
+        return tf.TensorSpec(shape=feature.shape, dtype=feature.dtype)
+    elif isinstance(feature, datasets.Sequence):
+        return feature_to_spec(feature.feature, length=feature.length)
+    elif isinstance(feature, list):
+        return [feature_to_spec(f, length=length) for f in feature]
+    elif isinstance(feature, dict):
+        return {k: feature_to_spec(v, length=length) for k, v in feature.items()}
+    else:
+        raise ValueError(f"Unparseable feature type {type(feature)}")
+
+
+def hf_dataset_to_tf_dataset(dataset):
+    return tf.data.Dataset.from_generator(
+        dataset.__iter__, output_signature={k: feature_to_spec(v) for k, v in dataset.features.items()}
+    )
+
+
+def apply_template(dataset, template):
+    def map_fn(ex):
+        ex = promptsource.utils.removeHyphen(ex)
+        inputs_and_targets = template.apply(ex)
+        answer_choices = template.get_answer_choices_list(ex)
+        if len(inputs_and_targets) == 2:
+            inputs, targets = inputs_and_targets
+            if targets == "":
+                ex = {"inputs": inputs, "targets": "<NO LABEL>"}
+            else:
+                ex = {"inputs": inputs, "targets": targets}
+        # When template results in an empty example, template.apply returns [""]
+        # Also, if the template gets split wrong, len can be > 2
+        # We will filter these out later
+        else:
+            ex = {"inputs": "", "targets": ""}
+
+        if answer_choices:
+            ex["answer_choices"] = answer_choices
+
+        return ex
+
+    def filter_fn(ex):
+        return len(ex["inputs"]) > 0 and len(ex["targets"]) > 0
+
+    original_columns = dataset.column_names
+    dataset = dataset.map(map_fn).filter(filter_fn)
+    # map keeps original columns, remove them
+    return dataset.remove_columns(set(original_columns) - {"inputs", "targets", "answer_choices"})
+
+
+def get_dataset_splits(dataset_name, subset_name=None):
+    info = datasets.get_dataset_infos(dataset_name)
+    subset_name = subset_name or list(info.keys())[0]
+    return info[subset_name].splits
+
+
+def task_clean(text):
+    # Clean the text according to allowed characters for a task name
+    return re.sub(r"[^\w\d\._]+", "_", text)
+
+
+def get_task_name(dataset_name, subset_name, template_name):
+    return task_clean(dataset_name + (f"_{subset_name}_" if subset_name is not None else "_") + template_name)
diff --git a/promptsource/session.py b/promptsource/session.py
new file mode 100644
index 0000000000000000000000000000000000000000..75d22656fe75e47c6a09e9f1f99f66e0853a8ef8
--- /dev/null
+++ b/promptsource/session.py
@@ -0,0 +1,89 @@
+#
+# Code for managing session state, which is needed for multi-input forms
+# See https://github.com/streamlit/streamlit/issues/1557
+#
+# This code is taken from
+# https://gist.github.com/okld/0aba4869ba6fdc8d49132e6974e2e662
+#
+from streamlit.hashing import _CodeHasher
+from streamlit.report_thread import get_report_ctx
+from streamlit.server.server import Server
+
+
+class _SessionState:
+    def __init__(self, session, hash_funcs):
+        """Initialize SessionState instance."""
+        self.__dict__["_state"] = {
+            "data": {},
+            "hash": None,
+            "hasher": _CodeHasher(hash_funcs),
+            "is_rerun": False,
+            "session": session,
+        }
+
+    def __call__(self, **kwargs):
+        """Initialize state data once."""
+        for item, value in kwargs.items():
+            if item not in self._state["data"]:
+                self._state["data"][item] = value
+
+    def __getitem__(self, item):
+        """Return a saved state value, None if item is undefined."""
+        return self._state["data"].get(item, None)
+
+    def __getattr__(self, item):
+        """Return a saved state value, None if item is undefined."""
+        return self._state["data"].get(item, None)
+
+    def __setitem__(self, item, value):
+        """Set state value."""
+        self._state["data"][item] = value
+
+    def __setattr__(self, item, value):
+        """Set state value."""
+        self._state["data"][item] = value
+
+    def clear(self):
+        """Clear session state and request a rerun."""
+        self._state["data"].clear()
+        self._state["session"].request_rerun(None)
+
+    def sync(self):
+        """
+        Rerun the app with all state values up to date from the beginning to
+        fix rollbacks.
+        """
+        data_to_bytes = self._state["hasher"].to_bytes(self._state["data"], None)
+
+        # Ensure to rerun only once to avoid infinite loops
+        # caused by a constantly changing state value at each run.
+        #
+        # Example: state.value += 1
+        if self._state["is_rerun"]:
+            self._state["is_rerun"] = False
+
+        elif self._state["hash"] is not None:
+            if self._state["hash"] != data_to_bytes:
+                self._state["is_rerun"] = True
+                self._state["session"].request_rerun(None)
+
+        self._state["hash"] = data_to_bytes
+
+
+def _get_session():
+    session_id = get_report_ctx().session_id
+    session_info = Server.get_current()._get_session_info(session_id)
+
+    if session_info is None:
+        raise RuntimeError("Couldn't get your Streamlit Session object.")
+
+    return session_info.session
+
+
+def _get_state(hash_funcs=None):
+    session = _get_session()
+
+    if not hasattr(session, "_custom_session_state"):
+        session._custom_session_state = _SessionState(session, hash_funcs)
+
+    return session._custom_session_state
diff --git a/promptsource/templates.py b/promptsource/templates.py
new file mode 100644
index 0000000000000000000000000000000000000000..52425f26663f0d120b6660a94bee98a085c7cccf
--- /dev/null
+++ b/promptsource/templates.py
@@ -0,0 +1,515 @@
+import os
+import random
+import uuid
+from collections import Counter, defaultdict
+from shutil import rmtree
+from typing import Dict, List, Optional, Tuple
+
+import pandas as pd
+import pkg_resources
+import yaml
+from jinja2 import BaseLoader, Environment, meta
+
+
+# Truncation of jinja template variables
+# 1710 = 300 words x 4.7 avg characters per word + 300 spaces
+TEXT_VAR_LENGTH = 2048
+
+# Local path to the folder containing the templates
+TEMPLATES_FOLDER_PATH = pkg_resources.resource_filename(__name__, "templates")
+
+env = Environment(loader=BaseLoader)
+
+# Allow the python function zip()
+env.globals.update(zip=zip)
+
+# These are users whose datasets should be included in the results returned by
+# filter_english_datasets (regardless of their metadata)
+INCLUDED_USERS = {"Zaid", "craffel"}
+
+
+def highlight(input):
+    return "<span style='color: #F08080'>" + input + "</span>"
+
+
+def choice(choices):
+    return random.choice(choices)
+
+
+def most_frequent(items):
+    """Returns the set of items which appear most frequently in the input"""
+    if not items:
+        return
+    item_counts = Counter(items).most_common()
+    max_freq = item_counts[0][1]
+    most_frequent_items = [c[0] for c in item_counts if c[1] == max_freq]
+    return most_frequent_items
+
+
+env.filters["highlight"] = highlight
+env.filters["choice"] = choice
+env.filters["most_frequent"] = most_frequent
+
+
+class Template(yaml.YAMLObject):
+    """
+    A prompt template.
+    """
+
+    yaml_tag = "!Template"
+
+    def __init__(self, name, jinja, reference, metadata=None, answer_choices=None):
+        """
+        Creates a prompt template.
+
+        A prompt template is expressed in Jinja. It is rendered using an example
+        from the corresponding Hugging Face datasets library (a dictionary). The
+        separator ||| should appear once to divide the template into prompt and
+        output. Generally, the prompt should provide information on the desired
+        behavior, e.g., text passage and instructions, and the output should be
+        a desired response.
+
+        :param name: unique name (per dataset) for template
+        :param jinja: template expressed in Jinja
+        :param reference: string describing author or paper reference for template
+        :param metadata: a Metadata object with template annotations
+        :param answer_choices: Jinja expression for answer choices. Should produce
+                               a ||| delimited string of choices that enumerates
+                               the possible completions for templates that should
+                               be evaluated as ranked completions. If None, then
+                               the template is open-ended. This list is accessible
+                               from within Jinja as the variable `answer_choices`.
+        """
+        self.id = str(uuid.uuid4())
+        self.name = name
+        self.jinja = jinja
+        self.reference = reference
+        self.metadata = metadata if metadata is not None else Template.Metadata()
+        self.answer_choices = answer_choices
+
+    def get_id(self):
+        """
+        Returns the id of the template
+
+        :return: unique id for template
+        """
+        return self.id
+
+    def get_name(self):
+        """
+        Returns the name of the template
+
+        :return: unique (per dataset) name for template
+        """
+        return self.name
+
+    def get_reference(self):
+        """
+        Returns the bibliographic reference (or author) for the template
+
+        :return: reference as a string
+        """
+        return self.reference
+
+    def get_answer_choices_expr(self):
+        """
+        Returns a Jinja expression for computing the answer choices from an example.
+
+        :return: String, or None if no answer choices
+        """
+        return self.answer_choices
+
+    def get_answer_choices_list(self, example):
+        """
+        Returns a list of answer choices for a given example
+
+        :return: list of strings, or None if get_answer_choices_expr is None
+        """
+        jinja = self.get_answer_choices_expr()
+        if jinja is None:
+            return None
+
+        rtemplate = env.from_string(jinja)
+        protected_example = self._escape_pipe(example)
+        rendered_choices = rtemplate.render(**protected_example)
+        return [self._unescape_pipe(answer_choice.strip()) for answer_choice in rendered_choices.split("|||")]
+
+    def get_fixed_answer_choices_list(self):
+        """
+        Returns a list of answer choices that is static across examples, if possible
+
+        :return: list of strings, or None if no static list exists
+        """
+        jinja = self.get_answer_choices_expr()
+        if jinja is None:
+            return None
+
+        parse = env.parse(jinja)
+        variables = meta.find_undeclared_variables(parse)
+        if len(variables) == 0:
+            rtemplate = env.from_string(jinja)
+            rendered_choices = rtemplate.render()
+            return [answer_choice.strip() for answer_choice in rendered_choices.split("|||")]
+        else:
+            return None
+
+    def apply(self, example, truncate=True, highlight_variables=False):
+        """
+        Creates a prompt by applying this template to an example
+
+        :param example: the dataset example to create a prompt for
+        :param truncate: if True, example fields will be truncated to TEXT_VAR_LENGTH chars
+        :param highlight_variables: highlight the added variables
+        :return: tuple of 2 strings, for prompt and output
+        """
+        jinja = self.jinja
+
+        # Truncates the prompt if needed
+        if truncate:
+            trunc_command = (
+                f" | string | truncate({TEXT_VAR_LENGTH}) }}}}"  # Escaping curly braces requires doubling them
+            )
+            jinja = jinja.replace("}}", trunc_command)
+
+        # Highlights text that was substituted for variables, if requested
+        if highlight_variables:
+            jinja = jinja.replace("}}", " | highlight }}")
+        rtemplate = env.from_string(jinja)
+
+        protected_example = self._escape_pipe(example)
+
+        # Adds in answer_choices variable
+        if "answer_choices" in protected_example:
+            raise ValueError("Example contains the restricted key 'answer_choices'.")
+
+        protected_example["answer_choices"] = self.get_answer_choices_list(example)
+
+        # Renders the Jinja template
+        rendered_example = rtemplate.render(**protected_example)
+
+        # Splits on the separator, and then replaces back any occurrences of the
+        # separator in the original example
+        return [self._unescape_pipe(part).strip() for part in rendered_example.split("|||")]
+
+    pipe_protector = "3ed2dface8203c4c9dfb1a5dc58e41e0"
+
+    @classmethod
+    def _escape_pipe(cls, example):
+        # Replaces any occurrences of the "|||" separator in the example, which
+        # which will be replaced back after splitting
+        protected_example = {
+            key: value.replace("|||", cls.pipe_protector) if isinstance(value, str) else value
+            for key, value in example.items()
+        }
+        return protected_example
+
+    @classmethod
+    def _unescape_pipe(cls, string):
+        # replaces back any occurrences of the separator in a string
+        return string.replace(cls.pipe_protector, "|||")
+
+    class Metadata(yaml.YAMLObject):
+        """
+        Metadata for a prompt template.
+        """
+
+        yaml_tag = "!TemplateMetadata"
+
+        def __init__(
+            self,
+            original_task: Optional[bool] = None,
+            choices_in_prompt: Optional[bool] = None,
+            metrics: Optional[List[str]] = None,
+        ):
+            """
+            Initializes template metadata.
+
+            In the following, trivial choices are defined as Yes/No, True/False,
+            etc. and nontrivial choices are other types of choices denoted in
+            the answer_choices field.
+
+            :param original_task: If True, this prompt asks a model to perform the original task designed for
+                this dataset.
+            :param choices_in_prompt: If True, the answer choices are included in the templates such that models
+                see those choices in the input. Only applicable to classification tasks.
+            :param metrics: List of strings denoting metrics to use for evaluation
+            """
+            self.original_task = original_task
+            self.choices_in_prompt = choices_in_prompt
+            self.metrics = metrics
+
+
+class TemplateCollection:
+    """
+    This helper class wraps the DatasetTemplates class
+    - Initialized the DatasetTemplates for all existing template folder
+    - Give access to each DatasetTemplates
+    - Provides aggregated counts over all DatasetTemplates
+    """
+
+    def __init__(self):
+
+        # Dict of all the DatasetTemplates, key is the tuple (dataset_name, subset_name)
+        self.datasets_templates: Dict[(str, Optional[str]), DatasetTemplates] = self._collect_datasets()
+
+    @property
+    def keys(self):
+        return list(self.datasets_templates.keys())
+
+    def __len__(self) -> int:
+        return len(self.datasets_templates)
+
+    def remove(self, dataset_name: str, subset_name: Optional[str] = None) -> None:
+        del self.datasets_templates[dataset_name, subset_name]
+
+    def _collect_datasets(self) -> Dict[Tuple[str, str], "DatasetTemplates"]:
+        """
+        Initialize a DatasetTemplates object for each templates.yaml detected in the templates folder
+
+        Returns: a dict with key=(dataset_name, subset_name)
+        """
+        dataset_folders = os.listdir(TEMPLATES_FOLDER_PATH)
+        dataset_folders = [folder for folder in dataset_folders if not folder.startswith(".")]
+
+        output = {}  # format is {(dataset_name, subset_name): DatasetsTemplates}
+        for dataset in dataset_folders:
+            if dataset in INCLUDED_USERS:
+                for filename in os.listdir(os.path.join(TEMPLATES_FOLDER_PATH, dataset)):
+                    output = {**output, **self._collect_dataset(dataset + "/" + filename)}
+            else:
+                output = {**output, **self._collect_dataset(dataset)}
+        return output
+
+    def _collect_dataset(self, dataset):
+        output = {}  # format is {(dataset_name, subset_name): DatasetsTemplates}
+        for filename in os.listdir(os.path.join(TEMPLATES_FOLDER_PATH, dataset)):
+            if filename.endswith(".yaml"):
+                # If there is no sub-folder, there is no subset for this dataset
+                output[(dataset, None)] = DatasetTemplates(dataset)
+            else:
+                # This is a subfolder, and its name corresponds to the subset name
+                output[(dataset, filename)] = DatasetTemplates(dataset_name=dataset, subset_name=filename)
+        return output
+
+    def get_dataset(self, dataset_name: str, subset_name: Optional[str] = None) -> "DatasetTemplates":
+        """
+        Return the DatasetTemplates object corresponding to the dataset name
+
+        :param dataset_name: name of the dataset to get
+        :param subset_name: name of the subset
+        """
+        # if the dataset does not exist, we add it
+        if dataset_name not in self.keys:
+            self.datasets_templates[(dataset_name, subset_name)] = DatasetTemplates(dataset_name, subset_name)
+
+        return self.datasets_templates[(dataset_name, subset_name)]
+
+    def get_templates_count(self) -> Dict:
+        """
+        Return the overall number count over all datasets
+
+        NB: we don't breakdown datasets into subsets for the count, i.e subsets count are included
+        into the dataset count
+        """
+
+        count_dict = defaultdict(int)
+        for k, v in self.datasets_templates.items():
+            # Subsets count towards dataset count
+            count_dict[k[0]] += len(v)
+        # converting to regular dict
+        return dict(count_dict)
+
+
+class DatasetTemplates:
+    """
+    Class that wraps all templates for a specific dataset/subset and implements all the helper
+    functions necessary to read/write to the yaml file
+    """
+
+    TEMPLATES_KEY = "templates"
+    DATASET_KEY = "dataset"
+    SUBSET_KEY = "subset"
+    TEMPLATE_FILENAME = "templates.yaml"
+
+    def __init__(self, dataset_name: str, subset_name: str = None):
+        self.dataset_name: str = dataset_name
+        self.subset_name: str = subset_name
+        # dictionary is keyed by template name.
+        self.templates: Dict = self.read_from_file()
+
+        # Mapping from template name to template id
+        self.name_to_id_mapping = {}
+        self.sync_mapping()
+
+    def sync_mapping(self) -> None:
+        """
+        Re-compute the name_to_id_mapping to ensure it is in sync with self.templates
+        """
+        self.name_to_id_mapping = {template.name: template.id for template in self.templates.values()}
+
+    @property
+    def all_template_names(self) -> List[str]:
+        """
+        Sorted list of all templates names for this dataset
+        """
+        return sorted([template.name for template in self.templates.values()])
+
+    @property
+    def folder_path(self) -> str:
+        if self.subset_name:
+            return os.path.join(TEMPLATES_FOLDER_PATH, self.dataset_name, self.subset_name)
+        else:
+            return os.path.join(TEMPLATES_FOLDER_PATH, self.dataset_name)
+
+    @property
+    def yaml_path(self) -> str:
+        return os.path.join(self.folder_path, self.TEMPLATE_FILENAME)
+
+    def format_for_dump(self) -> Dict:
+        """
+        Create a formatted dictionary for the class attributes
+        """
+        formatted_dict = {self.DATASET_KEY: self.dataset_name, self.TEMPLATES_KEY: self.templates}
+        if self.subset_name:
+            formatted_dict[self.SUBSET_KEY] = self.subset_name
+        return formatted_dict
+
+    def read_from_file(self) -> Dict:
+        """
+        Reads a file containing a prompt collection.
+        """
+
+        if not os.path.exists(self.yaml_path):
+            return {}
+        yaml_dict = yaml.load(open(self.yaml_path, "r"), Loader=yaml.FullLoader)
+        return yaml_dict[self.TEMPLATES_KEY]
+
+    def write_to_file(self) -> None:
+        """
+        Writes to a file with the current prompt collection.
+        """
+        # Sync the mapping
+        self.sync_mapping()
+
+        # We only create the folder if a template is written
+        if not os.path.exists(self.folder_path):
+            os.makedirs(self.folder_path)
+        yaml.dump(self.format_for_dump(), open(self.yaml_path, "w"))
+
+    def add_template(self, template: "Template") -> None:
+        """
+        Adds a new template for the dataset
+
+        :param template: template
+        """
+        self.templates[template.get_id()] = template
+
+        self.write_to_file()
+
+    def remove_template(self, template_name: str) -> None:
+        """
+        Deletes a template
+
+        :param template_name: name of template to remove
+        """
+
+        # Even if we have an ID, we want to check for duplicate names
+        if template_name not in self.all_template_names:
+            raise ValueError(f"No template with name {template_name} for dataset {self.dataset_name} exists.")
+
+        del self.templates[self.name_to_id_mapping[template_name]]
+
+        if len(self.templates) == 0:
+            # There is no remaining template, we can remove the entire folder
+            self.delete_folder()
+        else:
+            # We just update the file
+            self.write_to_file()
+
+    def update_template(
+        self,
+        current_template_name: str,
+        new_template_name: str,
+        jinja: str,
+        reference: str,
+        metadata: Template.Metadata,
+        answer_choices: str,
+    ) -> None:
+        """
+        Updates a pre-existing template and writes changes
+
+        :param current_template_name: current name of the template stored in self.templates
+        :param new_template_name: new name for the template
+        :param jinja: new jinja entry
+        :param reference: new reference entry
+        :param metadata: a Metadata object with template annotations
+        :param answer_choices: new answer_choices string
+        """
+        template_id = self.name_to_id_mapping[current_template_name]
+        self.templates[template_id].name = new_template_name
+        self.templates[template_id].jinja = jinja
+        self.templates[template_id].reference = reference
+        self.templates[template_id].metadata = metadata
+        self.templates[template_id].answer_choices = answer_choices
+
+        self.write_to_file()
+
+    def delete_folder(self) -> None:
+        """
+        Delete the folder corresponding to self.folder_path
+        """
+        self.sync_mapping()
+
+        rmtree(self.folder_path)
+
+        # If it is a subset, we have to check whether to remove the dataset folder
+        if self.subset_name:
+            # have to check for other folders
+            base_dataset_folder = os.path.join(TEMPLATES_FOLDER_PATH, self.dataset_name)
+            if len(os.listdir(base_dataset_folder)) == 0:
+                rmtree(base_dataset_folder)
+
+    def __getitem__(self, template_key: str) -> "Template":
+        return self.templates[self.name_to_id_mapping[template_key]]
+
+    def __len__(self) -> int:
+        return len(self.templates)
+
+
+def get_templates_data_frame():
+    """
+    Gathers all template information into a Pandas DataFrame.
+
+    :return: Pandas DataFrame
+    """
+    data = {
+        "id": [],
+        "dataset": [],
+        "subset": [],
+        "name": [],
+        "reference": [],
+        "original_task": [],
+        "choices_in_prompt": [],
+        "metrics": [],
+        "answer_choices": [],
+        "jinja": [],
+    }
+
+    template_collection = TemplateCollection()
+
+    for key in template_collection.keys:
+        templates = template_collection.get_dataset(key[0], key[1])
+        for template_name in templates.all_template_names:
+            template = templates[template_name]
+            data["id"].append(template.get_id())
+            data["dataset"].append(key[0])
+            data["subset"].append(key[1])
+            data["name"].append(template.get_name())
+            data["reference"].append(template.get_reference())
+            data["original_task"].append(template.metadata.original_task)
+            data["choices_in_prompt"].append(template.metadata.choices_in_prompt)
+            data["metrics"].append(template.metadata.metrics)
+            data["answer_choices"].append(template.get_answer_choices_expr())
+            data["jinja"].append(template.jinja)
+
+    return pd.DataFrame(data)
diff --git a/promptsource/templates/Zaid/coqa_expanded/templates.yaml b/promptsource/templates/Zaid/coqa_expanded/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..27c09adb25351aeba188c46e0eb653761d2497d8
--- /dev/null
+++ b/promptsource/templates/Zaid/coqa_expanded/templates.yaml
@@ -0,0 +1,116 @@
+dataset: Zaid/coqa_expanded
+templates:
+  12ad4331-d063-4b56-b0f6-76f59c690717: !Template
+    answer_choices: null
+    id: 12ad4331-d063-4b56-b0f6-76f59c690717
+    jinja: "Below is a passage, followed by a series of questions and answers about\
+      \ the passage. Answer the last question based on the information contained in\
+      \ the passage. If there is no answer in the passage, say \"unknown\".\n\nPassage:\
+      \ {{story}}\n\nQ: {{question}} \nA: ||| {% if answer[\"answer_start\"] != -1\
+      \ %}\n{{answer[\"input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Verbose instructions
+    reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+  2f9fb20d-f4c9-4371-9cd4-db47607cb7a3: !Template
+    answer_choices: null
+    id: 2f9fb20d-f4c9-4371-9cd4-db47607cb7a3
+    jinja: "What is the answer to the last question in the dialogue below? If there\
+      \ is no answer in the passage, say \"unknown\".\n\nPassage: {{story}}\n\nQ:\
+      \ {{question}} \nA: ||| {% if answer[\"answer_start\"] != -1 %}\n{{answer[\"\
+      input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: What is the answer
+    reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+  9aff8967-d41c-4d79-8ef4-fc3650773735: !Template
+    answer_choices: null
+    id: 9aff8967-d41c-4d79-8ef4-fc3650773735
+    jinja: "Complete the dialogue based on the information contained in the passage.\
+      \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{story}}\n\
+      \nQ: {{question}} \nA: ||| {% if answer[\"answer_start\"] != -1 %}\n{{answer[\"\
+      input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Complete the dialogue
+    reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+  9bc32f2e-eee6-4006-bce3-74a79403d33e: !Template
+    answer_choices: null
+    id: 9bc32f2e-eee6-4006-bce3-74a79403d33e
+    jinja: "Answer the last question based on the information contained in the passage.\
+      \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{story}}\n\
+      \nQ: {{question}} \nA: ||| {% if answer[\"answer_start\"] != -1 %}\n{{answer[\"\
+      input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Answer the last question
+    reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+  bacb6534-e607-4afc-a412-ccfcd9fe38e2: !Template
+    answer_choices: null
+    id: bacb6534-e607-4afc-a412-ccfcd9fe38e2
+    jinja: 'In the passage below, extract the part which answers the last question.
+      If there is no answer in the passage, say "unknown".
+
+
+      Passage: {{story}}
+
+
+      Q: {{question}}
+
+      A: |||
+
+      {% if answer["answer_start"] != -1 %}
+
+      {{story[answer["answer_start"] : answer["answer_end"] ]}}
+
+      {% else %}
+
+      unknown
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: extract_answer
+    reference: ''
+  be39974f-aa86-4076-b444-bd3c2732b17b: !Template
+    answer_choices: null
+    id: be39974f-aa86-4076-b444-bd3c2732b17b
+    jinja: "Help me complete the dialogue about this passage. If there is no answer\
+      \ in the passage, say \"unknown\".\n\nPassage: {{story}}\n\nQ: {{question}}\
+      \ \nA: ||| {% if answer[\"answer_start\"] != -1 %}\n{{answer[\"input_text\"\
+      ]}}\n{% else %}\nunknown\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Help me
+    reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+  d95440ce-d538-40f8-ae09-664e05852ca8: !Template
+    answer_choices: null
+    id: d95440ce-d538-40f8-ae09-664e05852ca8
+    jinja: "{{story}}\n\nQ: {{question}} \nA: ||| {% if answer[\"answer_start\"] !=\
+      \ -1 %}\n{{answer[\"input_text\"]}}\n{% else %}\nunknown\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: GPT-3 Style
+    reference: 'Brown et al. NeurIPS 2020. Metric: variant of SQuAD (Section 6.1 of
+      the paper)'
diff --git a/promptsource/templates/Zaid/quac_expanded/templates.yaml b/promptsource/templates/Zaid/quac_expanded/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c46ca7958780507b6aa680fc97492d699c9f5d2b
--- /dev/null
+++ b/promptsource/templates/Zaid/quac_expanded/templates.yaml
@@ -0,0 +1,79 @@
+dataset: Zaid/quac_expanded
+templates:
+  01d8c949-89a7-4a44-9a39-6cf2ac3e0a7b: !Template
+    answer_choices: null
+    id: 01d8c949-89a7-4a44-9a39-6cf2ac3e0a7b
+    jinja: "What is the answer to the last question in the dialogue below? If there\
+      \ is no answer in the passage, say \"unknown\".\n\nPassage: {{context}}\n\n\
+      Q: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: What is the answer
+    reference: 'Metric: F1'
+  1484c6e6-bf42-47ca-9ea7-c3c552a24de1: !Template
+    answer_choices: null
+    id: 1484c6e6-bf42-47ca-9ea7-c3c552a24de1
+    jinja: "{{context}}\n\nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: GPT-3 Style
+    reference: 'Brown et al. NeurIPS 2020. Metric: F1'
+  2bca0532-01a3-4a64-a228-a57ae0965719: !Template
+    answer_choices: null
+    id: 2bca0532-01a3-4a64-a228-a57ae0965719
+    jinja: "Below is a passage, followed by a series of questions and answers about\
+      \ the passage. Answer the last question based on the information contained in\
+      \ the passage. If there is no answer in the passage, say \"unknown\".\n\nPassage:\
+      \ {{context}}\n\nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Verbose instructions
+    reference: 'Metric: F1'
+  4abd0379-dbc0-4f71-901b-dd0af3581157: !Template
+    answer_choices: null
+    id: 4abd0379-dbc0-4f71-901b-dd0af3581157
+    jinja: "Answer the last question based on the information contained in the passage.\
+      \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{context}}\n\
+      \nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Answer the last question
+    reference: 'Metric: F1'
+  8ebbd098-b40c-4e69-8cbb-0ffecf0fe2a6: !Template
+    answer_choices: null
+    id: 8ebbd098-b40c-4e69-8cbb-0ffecf0fe2a6
+    jinja: "Complete the dialogue based on the information contained in the passage.\
+      \ If there is no answer in the passage, say \"unknown\".\n\nPassage: {{context}}\n\
+      \nQ: {{question}} \nA: ||| {{answer[\"texts\"][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Complete the dialogue
+    reference: 'Metric: F1'
+  e624695b-5d26-47cc-bdb4-ac2bee4ddaea: !Template
+    answer_choices: null
+    id: e624695b-5d26-47cc-bdb4-ac2bee4ddaea
+    jinja: "Help me complete the dialogue about this passage. If there is no answer\
+      \ in the passage, say \"unknown\".\n\nPassage: {{context}}\n\nQ: {{question}}\
+      \ \nA: ||| {{answer[\"texts\"][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Help me
+    reference: 'Metric: F1'
diff --git a/promptsource/templates/acronym_identification/templates.yaml b/promptsource/templates/acronym_identification/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b5cb1c2008ba11557cb99ff7aa3ab0d28e9750ad
--- /dev/null
+++ b/promptsource/templates/acronym_identification/templates.yaml
@@ -0,0 +1,219 @@
+dataset: acronym_identification
+templates:
+  64f438f2-9968-459f-82d2-24bad632b358: !Template
+    answer_choices: null
+    id: 64f438f2-9968-459f-82d2-24bad632b358
+    jinja: "{% set random_abbr = '' %}\n{% set _dummy = none %}\n{% set abbr_exp_dict\
+      \ = namespace(value = {}) %}\n{% set abbr_string=namespace(value='') %}\n{%\
+      \ set exp_string=namespace(value='')%}\n \n{% for label_idx in range(labels|length)\
+      \ %}\n  {% if labels[label_idx] == 0 %}{# Long Beginning #}\n    {% set exp_string.value\
+      \ = tokens[label_idx] %}{# Create new long string #}\n  {% elif labels[label_idx]\
+      \ == 1 %}{# Short Beginning #}\n    {% if abbr_string.value!='' and abbr_string.value\
+      \ not in abbr_exp_dict.value.keys()%}{# Some string already present #}\n   \
+      \   {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:''}) %}{#\
+      \ Discard this string as a new short string is coming #}\n    {% endif %}\n\
+      \    {% set abbr_string.value = tokens[label_idx] %}{# Create new short string\
+      \ #}\n  {% elif labels[label_idx] == 2 %}{# Long Intermediate #}\n    {% set\
+      \ exp_string.value = exp_string.value+' '+tokens[label_idx] %}{# Update existing\
+      \ string #}\n  {% elif labels[label_idx] == 3 %}{# Short Intermediate #}\n \
+      \   {% set abbr_string.value = abbr_string.value+tokens[label_idx] %}{# Update\
+      \ existing string #}\n  {% else %}{# Other #}\n    {# Both non-empty, and first\
+      \ characters match #}\n    {% if abbr_string.value!='' and exp_string.value!=''\
+      \ and exp_string.value.split()[0][0]|lower in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower\
+      \ in abbr_string.value|lower%}\n      {# Update both the dictionaries #}\n \
+      \     {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:exp_string.value})\
+      \ %}\n      {# Empty both the strings #}\n      {% set abbr_string.value= ''\
+      \ %}\n      {% set exp_string.value= '' %}\n    {% endif %}\n  {% endif %}\n\
+      {% endfor %}\n{# Both non-empty, and first characters match #}\n{% if abbr_string.value!=''\
+      \ and exp_string.value!='' %}\n  {% if exp_string.value.split()[0][0]|lower\
+      \ in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower in abbr_string.value|lower\
+      \ %}\n    {# Update both the dictionaries #}\n    {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:exp_string.value})\
+      \ %}\n  {% elif abbr_exp_dict.value.items()|length==0 %}\n    {% set _dummy\
+      \ = abbr_exp_dict.value.update({abbr_string.value:exp_string.value}) %}\n  {%\
+      \ endif %}\n{% else %}\n  {% if abbr_string.value!=''%}\n    {% if abbr_string.value\
+      \ not in abbr_exp_dict.value.keys() %}\n      {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:''})\
+      \ %}\n    {% endif %}\n  {% endif %}\n{% endif %}\n{% if abbr_exp_dict.value\
+      \ %}\n{% set random_abbr = abbr_exp_dict.value.keys()|list|choice %}\nGiven\
+      \ the following tokens, find the expansion of {{random_abbr}}. Return {{\"Unclear\"\
+      }} if the expansion can't be found.\n \n{{tokens|join(' ')}}\n|||\n{% if random_abbr\
+      \ in abbr_exp_dict.value.keys() and abbr_exp_dict.value[random_abbr]!='' %}\n\
+      {{abbr_exp_dict.value[random_abbr]}}\n{% else %}\nUnclear\n{% endif %}\n{% endif\
+      \ %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find_expansion
+    reference: Given the tokens, find the expansion of an abbreviation in the tokens.
+  81babc83-18cd-4eed-a343-8ede56b21df5: !Template
+    answer_choices: null
+    id: 81babc83-18cd-4eed-a343-8ede56b21df5
+    jinja: "Given the BIO encoding as follows:  \"{{\"B-short\"}}\" and \"{{\"I-short\"\
+      }}\" represent the beginning and intermediate tokens for abbreviations.\"{{\"\
+      B-long\"}}\" and \"{{\"I-long\"}}\" represent the beginning and intermediate\
+      \ tokens for expansions of the abbreviations. All other tokens are represented\
+      \ by \"{{\"O\"}}\". \nGenerate comma-separated BIO encoding for the following\
+      \ comma-separated tokens: \n\n{{tokens|join(', ')}}\n|||\n{% for label in labels\
+      \ %}{{[\"B-long\", \"B-short\", \"I-long\",  \"I-short\", \"O\"][label]}}{%\
+      \ if not loop.last %},{%endif %}{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: bio_encode
+    reference: Given the comma separated tokens, generate BIO encoding for abbreviations.
+  8832e5f7-7c45-46da-b85f-71fcb444f264: !Template
+    answer_choices: null
+    id: 8832e5f7-7c45-46da-b85f-71fcb444f264
+    jinja: 'List all the expansions of the acronyms present in the following comma-separated
+      tokens. Return {{"No expansions found"}} if the expansions can''t be found.
+
+      {{tokens|join('', '')}}
+
+      |||
+
+      {% set abbr_string=namespace(value='''') %}
+
+      {% set answer_list=namespace(value=[]) %}
+
+      {% for label_idx in range(labels|length) %}
+
+      {% if labels[label_idx] == 0 %}
+
+      {% set abbr_string.value = tokens[label_idx] %}
+
+      {% elif abbr_string.value!='''' and labels[label_idx]==2%}
+
+      {% set abbr_string.value = abbr_string.value+'' ''+tokens[label_idx] %}
+
+      {% elif abbr_string.value!='''' and labels[label_idx]!=2%}
+
+      {% set answer_list.value = answer_list.value +[abbr_string.value] %}
+
+      {% set abbr_string.value = '''' %}
+
+      {% endif %}
+
+      {% if loop.last and abbr_string.value!='''' %}
+
+      {% set answer_list.value = answer_list.value +[abbr_string.value] %}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% if answer_list.value|length!=0 %}
+
+      {{ answer_list.value|join('', '') }}
+
+      {% else %}
+
+      No expansions found.
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: list_expansions
+    reference: Given the tokens, list the expansion tokens.
+  cae58242-cde9-472d-ae9e-56fc7e79c0d1: !Template
+    answer_choices: null
+    id: cae58242-cde9-472d-ae9e-56fc7e79c0d1
+    jinja: "List all the acryonyms in the following comma-separated tokens: \n\n{{tokens|join(',\
+      \ ')}}\n|||\n{% set abbr_string=namespace(value='') %}\n{% set answer_list=namespace(value=[])\
+      \ %}\n{% for label_idx in range(labels|length) %}\n{% if labels[label_idx] ==\
+      \ 1 %}\n{% set abbr_string.value = tokens[label_idx] %}\n{% elif abbr_string.value!=''\
+      \ and labels[label_idx]==3%}\n{% set abbr_string.value = abbr_string.value+tokens[label_idx]\
+      \ %}\n{% elif abbr_string.value!='' and labels[label_idx]!=3 %}\n{% set answer_list.value\
+      \ = answer_list.value +[abbr_string.value] %}\n{% set abbr_string.value = ''\
+      \ %}\n{% endif %}\n{% if loop.last and abbr_string.value!='' %}\n{% set answer_list.value\
+      \ = answer_list.value +[abbr_string.value] %}\n{% endif %}\n{% endfor %}\n{{\
+      \ answer_list.value|join(', ') }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: list_abbreviations
+    reference: Given the tokens, list the abbreviations.
+  e4e42433-0e37-4aa5-bbce-7f336ecac6a3: !Template
+    answer_choices: null
+    id: e4e42433-0e37-4aa5-bbce-7f336ecac6a3
+    jinja: "{% set _dummy = none %}\n{% set abbr_exp_dict = namespace(value = {})\
+      \ %}\n{% set abbr_string=namespace(value='') %}\n{% set exp_string=namespace(value='')%}\n\
+      \ \n{% for label_idx in range(labels|length) %}\n  {% if labels[label_idx] ==\
+      \ 0 %}{# Long Beginning #}\n    {% set exp_string.value = tokens[label_idx]\
+      \ %}{# Create new long string #}\n  {% elif labels[label_idx] == 1 %}{# Short\
+      \ Beginning #}\n    {% if abbr_string.value!='' and abbr_string.value not in\
+      \ abbr_exp_dict.value.keys()%}{# Some string already present #}\n      {% set\
+      \ _dummy = abbr_exp_dict.value.update({abbr_string.value:''}) %}{# Discard this\
+      \ string as a new short string is coming #}\n    {% endif %}\n    {% set abbr_string.value\
+      \ = tokens[label_idx] %}{# Create new short string #}\n  {% elif labels[label_idx]\
+      \ == 2 %}{# Long Intermediate #}\n    {% set exp_string.value = exp_string.value+'\
+      \ '+tokens[label_idx] %}{# Update existing string #}\n  {% elif labels[label_idx]\
+      \ == 3 %}{# Short Intermediate #}\n    {% set abbr_string.value = abbr_string.value+tokens[label_idx]\
+      \ %}{# Update existing string #}\n  {% else %}{# Other #}\n    {# Both non-empty,\
+      \ and first characters match #}\n    {% if abbr_string.value!='' and exp_string.value!=''\
+      \ and exp_string.value.split()[0][0]|lower in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower\
+      \ in abbr_string.value|lower%}\n      {# Update both the dictionaries #}\n \
+      \     {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:exp_string.value})\
+      \ %}\n      {# Empty both the strings #}\n      {% set abbr_string.value= ''\
+      \ %}\n      {% set exp_string.value= '' %}\n    {% endif %}\n  {% endif %}\n\
+      {% endfor %}\n{# Both non-empty, and first characters match #}\n{% if abbr_string.value!=''\
+      \ and exp_string.value!='' %}\n  {% if exp_string.value.split()[0][0]|lower\
+      \ in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower in abbr_string.value|lower\
+      \ %}\n    {# Update both the dictionaries #}\n    {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:exp_string.value})\
+      \ %}\n  {% elif abbr_exp_dict.value.items()|length==0 %}\n    {% set _dummy\
+      \ = abbr_exp_dict.value.update({abbr_string.value:exp_string.value}) %}\n  {%\
+      \ endif %}\n{% else %}\n  {% if abbr_string.value!=''%}\n    {% if abbr_string.value\
+      \ not in abbr_exp_dict.value.keys() %}\n      {% set _dummy = abbr_exp_dict.value.update({abbr_string.value:''})\
+      \ %}\n    {% endif %}\n  {% endif %}\n{% endif %}\n \nGiven the following tokens,\
+      \ find the abbreviations and their expansions. Return {{\"Unclear\"}} if the\
+      \ expansion can't be found.\n \n{{tokens|join(' ')}}\n|||\n{% for item, value\
+      \ in abbr_exp_dict.value.items() %}\n{{item}} : {% if value!='' %}{{value}}{%\
+      \ else %}Unclear{% endif %}\n{%endfor%}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find_mapping
+    reference: Given the tokens, find the abbreviation mapping.
+  eed32ee4-ebc3-499f-ba61-e91461f56ccb: !Template
+    answer_choices: null
+    id: eed32ee4-ebc3-499f-ba61-e91461f56ccb
+    jinja: "{% set random_exp = '' %}{% set _dummy = none %}{% set exp_abbr_dict =\
+      \ namespace(value = {}) %}{% set abbr_string=namespace(value='') %}{% set exp_string=namespace(value='')%}{%\
+      \ for label_idx in range(labels|length) %}{% if labels[label_idx] == 0 %}{#\
+      \ Long Beginning #}{% if exp_string.value!='' and exp_string.value not in exp_abbr_dict.value.keys()\
+      \ %}{# Some string already present #}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:''})\
+      \ %}{# Discard this string as a new long string is coming #} {% endif %}{% set\
+      \ exp_string.value = tokens[label_idx] %}{# Create new long string #}{% elif\
+      \ labels[label_idx] == 1 %}{# Short Beginning #}{% set abbr_string.value = tokens[label_idx]\
+      \ %}{# Create new short string #}{% elif labels[label_idx] == 2 %}{# Long Intermediate\
+      \ #}{% set exp_string.value = exp_string.value+' '+tokens[label_idx] %}{# Update\
+      \ existing string #}{% elif labels[label_idx] == 3 %}{# Short Intermediate #}{%\
+      \ set abbr_string.value = abbr_string.value+tokens[label_idx] %}{# Update existing\
+      \ string #}{% else %}{# Other #}{# Both non-empty, and first characters match\
+      \ #}{% if abbr_string.value!='' and exp_string.value!='' and exp_string.value.split()[0][0]|lower\
+      \ in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower in abbr_string.value|lower%}{#\
+      \ Update both the dictionaries #}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:abbr_string.value})\
+      \ %}{# Empty both the strings #}{% set abbr_string.value= '' %}{% set exp_string.value=\
+      \ '' %}{% endif %}{% endif %}{% endfor %}{# Both non-empty, and first characters\
+      \ match #}{% if abbr_string.value!='' and exp_string.value!='' %}{% if exp_string.value.split()[0][0]|lower\
+      \ in abbr_string.value|lower and exp_string.value.split()[-1][0]|lower in abbr_string.value|lower\
+      \ %}{# Update the dictionary #}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:abbr_string.value})\
+      \ %}{% elif exp_abbr_dict.value.items()|length==0 %}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:abbr_string.value})\
+      \ %}{% endif %}{% else %}{% if exp_string.value!='' %}{% if exp_string.value\
+      \ not in exp_abbr_dict.value.keys() %}{% set _dummy = exp_abbr_dict.value.update({exp_string.value:''})\
+      \ %}{% endif %}{% endif %}{% endif %}{% if exp_abbr_dict.value.items()|length!=0\
+      \ %}{% set random_exp = exp_abbr_dict.value.keys()|list|choice %}Given the following\
+      \ tokens, find the abbreviation for: {{random_exp}}. Return \"Unclear\" if the\
+      \ abbreviation can't be found.\n \n{{tokens|join(' ')}}|||{% if random_exp in\
+      \ exp_abbr_dict.value.keys() and exp_abbr_dict.value[random_exp]!='' %}{{exp_abbr_dict.value[random_exp]}}{%\
+      \ else %}Unclear{% endif %}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find_abbreviation
+    reference: Given the tokens, find the abbreviation for an expansion.
diff --git a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e848a96bed07c86a5d3704d0c1901d00ddb1278a
--- /dev/null
+++ b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_classification/templates.yaml
@@ -0,0 +1,39 @@
+dataset: ade_corpus_v2
+subset: Ade_corpus_v2_classification
+templates:
+  56bd12a8-b8ee-464e-98cc-5f586ba9f74d: !Template
+    answer_choices: Not-Related ||| Related
+    id: 56bd12a8-b8ee-464e-98cc-5f586ba9f74d
+    jinja: Is "{{text}}" related to adverse drug effect (ADE)? ||| {{answer_choices[label]}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: baseline
+    reference: ''
+  78c4ce65-dd66-46ed-878d-11f4eca5e544: !Template
+    answer_choices: Yes, it is related to adverse drug effect. ||| No, it is not related
+      to adverse drug effect.
+    id: 78c4ce65-dd66-46ed-878d-11f4eca5e544
+    jinja: "Read the below text and answer the question.\n\nText: {{text}} \n\nQuestion:\
+      \ Is the above text related to adverse drug effect (ADE)?\n\nA. Yes, it is related\
+      \ to adverse drug effect.\n\nB. No, it is not related to adverse drug effect.\n\
+      |||\n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: verbose
+    reference: ''
+  dabc0337-5bd3-4150-98b3-794a15ce1a3a: !Template
+    answer_choices: null
+    id: dabc0337-5bd3-4150-98b3-794a15ce1a3a
+    jinja: "{% if label==1 %}\nWrite a medical report that is related to adverse drug\
+      \ effect (ADE). \n{% else %}\nWrite a medical report that is not related to\
+      \ adverse drug effect (ADE). \n{% endif %}\n|||\n{{text}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: label-to-text
+    reference: ''
diff --git a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..67a82d71ddf578f118a92cf20fbf901a366ce0ee
--- /dev/null
+++ b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_ade_relation/templates.yaml
@@ -0,0 +1,89 @@
+dataset: ade_corpus_v2
+subset: Ade_corpus_v2_drug_ade_relation
+templates:
+  0ec35408-652d-4ebc-9478-5a0d330c24c8: !Template
+    answer_choices: null
+    id: 0ec35408-652d-4ebc-9478-5a0d330c24c8
+    jinja: 'What drug has an effect of {{effect}}?
+
+      |||
+
+      {{drug}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: effect2drug
+    reference: ''
+  2682a789-a435-4976-b34f-f376991c842a: !Template
+    answer_choices: null
+    id: 2682a789-a435-4976-b34f-f376991c842a
+    jinja: '{{drug}} has an effect of {{effect}}. Create a sentence using this drug
+      and its effect.
+
+      |||
+
+      {{text}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: drug-and-effect-to-text
+    reference: ''
+  61ba3622-72bc-4fd8-acfc-826bc2a93aa5: !Template
+    answer_choices: null
+    id: 61ba3622-72bc-4fd8-acfc-826bc2a93aa5
+    jinja: 'What effect does {{drug}} have?
+
+      |||
+
+      {{effect}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: drug2effect
+    reference: ''
+  6acf3588-baa1-4ff6-87c4-4c2356855464: !Template
+    answer_choices: null
+    id: 6acf3588-baa1-4ff6-87c4-4c2356855464
+    jinja: 'Read the below text and answer the question.
+
+
+      Text: {{text}}
+
+
+      Question: What are the drug and its effect of the above text, respectively?
+
+      |||
+
+      {{drug}} and {{effect}}, respectively.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: baseline
+    reference: ''
+  db68e609-ba92-40ae-b161-8b7710124142: !Template
+    answer_choices: null
+    id: db68e609-ba92-40ae-b161-8b7710124142
+    jinja: 'Read the below text and answer the two following questions.
+
+
+      Text: {{text}}
+
+
+      Question 1: What is the drug in the above text?
+
+
+      Question 2: What is the effect of it?
+
+      |||
+
+      The drug is {{drug}} and its effect is {{effect}}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: two-questions
+    reference: ''
diff --git a/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..61bdecee3c02e46a30385d9cd7f73ae9e1b939ab
--- /dev/null
+++ b/promptsource/templates/ade_corpus_v2/Ade_corpus_v2_drug_dosage_relation/templates.yaml
@@ -0,0 +1,82 @@
+dataset: ade_corpus_v2
+subset: Ade_corpus_v2_drug_dosage_relation
+templates:
+  1de6d411-ed0a-4d48-806e-cad009f07a65: !Template
+    answer_choices: null
+    id: 1de6d411-ed0a-4d48-806e-cad009f07a65
+    jinja: 'What drug has a dosage of {{dosage}}?
+
+      |||
+
+      {{drug}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: dosage2drug
+    reference: ''
+  1e719388-59c9-4b0a-9ed9-dd02b6ddd0a6: !Template
+    answer_choices: null
+    id: 1e719388-59c9-4b0a-9ed9-dd02b6ddd0a6
+    jinja: '{{dosage}} of {{drug}} was given to a patient. What kind of symptom did
+      this patient have?
+
+      |||
+
+      {{text}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: drug-and-dosage-to-text
+    reference: ''
+  2bed0f04-8249-4248-86ea-e3a1971b2e1b: !Template
+    answer_choices: null
+    id: 2bed0f04-8249-4248-86ea-e3a1971b2e1b
+    jinja: 'Read the below text and answer the two following questions.
+
+
+      Text: {{text}}
+
+
+
+      Question 1: What is the drug in the above text?
+
+
+      Question 2: What is the dosage of it?
+
+      |||
+
+      The drug is {{drug}} and its dosage is {{dosage}}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: two-questions
+    reference: ''
+  ca175bed-d046-40e7-9dbb-1e50fde7e603: !Template
+    answer_choices: null
+    id: ca175bed-d046-40e7-9dbb-1e50fde7e603
+    jinja: 'What is a possible dosage of {{drug}}?
+
+      |||
+
+      {{dosage}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: drug2dosage
+    reference: ''
+  ce5208ac-6b4c-4a35-8738-e20232df1917: !Template
+    answer_choices: null
+    id: ce5208ac-6b4c-4a35-8738-e20232df1917
+    jinja: "Read the below text and answer the question.\n\nText: {{text}}\n\nQuestion:\
+      \ What are the drug and its dosage of the above text, respectively? \n|||\n\
+      {{drug}} and {{dosage}}, respectively."
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: baseline
+    reference: ''
diff --git a/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml b/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..90b95617e31d7cbcc25a467393c479ed33e72d42
--- /dev/null
+++ b/promptsource/templates/adversarial_qa/adversarialQA/templates.yaml
@@ -0,0 +1,110 @@
+dataset: adversarial_qa
+subset: adversarialQA
+templates:
+  00755780-f3c0-44b4-b159-8f3873cdb16c: !Template
+    answer_choices: null
+    id: 00755780-f3c0-44b4-b159-8f3873cdb16c
+    jinja: 'I want to test the ability of students to read a passage and answer questions
+      about it. Could you please come up with a good question for the passage "{{context}}"?
+      |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question
+    reference: 'Input: Context, Output: Question (generate a question)'
+  3b2459cc-6600-443c-abf8-8f60c34cd998: !Template
+    answer_choices: null
+    id: 3b2459cc-6600-443c-abf8-8f60c34cd998
+    jinja: '{% if metadata.split != "test" %}
+
+      I know that the answer to the question "{{question}}" is in "{{context}}". Can
+      you tell me what it is? |||
+
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: tell_what_it_is
+    reference: 'Input: QC, Output: A (rephrase)'
+  5bdb1815-5c6f-49a3-ad1d-367344420701: !Template
+    answer_choices: null
+    id: 5bdb1815-5c6f-49a3-ad1d-367344420701
+    jinja: '{% if metadata.split != "test" %}
+
+      Question: "{{question}}"
+
+
+      Context: "{{context}}"
+
+
+      Answer:
+
+      |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: question_context_answer
+    reference: 'Input: QC, Output: Answer (short form)'
+  a0872cde-2f19-4ae6-919a-868da47bfbcb: !Template
+    answer_choices: null
+    id: a0872cde-2f19-4ae6-919a-868da47bfbcb
+    jinja: '{% if metadata.split != "test" %}
+
+      Extract the answer to the question from the following context.
+
+      Question: {{question}}
+
+      Context: {{context}}|||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: based_on
+    reference: ''
+  a64d5a15-68e2-4d1c-b30a-ca8250c860f9: !Template
+    answer_choices: null
+    id: a64d5a15-68e2-4d1c-b30a-ca8250c860f9
+    jinja: '{% if metadata.split != "test" %}
+
+      Given the following passage
+
+
+      "{{context}}",
+
+
+      answer the following question. Note that the answer is present within the text.
+
+
+      Question: {{question}} |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: answer_the_following_q
+    reference: 'Input: QC, Output: Answer'
diff --git a/promptsource/templates/adversarial_qa/dbert/templates.yaml b/promptsource/templates/adversarial_qa/dbert/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9b839e6c65e6f58a400de85dfa3dcbb54b1fdb73
--- /dev/null
+++ b/promptsource/templates/adversarial_qa/dbert/templates.yaml
@@ -0,0 +1,110 @@
+dataset: adversarial_qa
+subset: dbert
+templates:
+  00755780-f3c0-44b4-b159-8f3873cdb16a: !Template
+    answer_choices: null
+    id: 00755780-f3c0-44b4-b159-8f3873cdb16a
+    jinja: 'I want to test the ability of students to read a passage and answer questions
+      about it. Could you please come up with a good question for the passage "{{context}}"?
+      |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question
+    reference: 'Input: Context, Output: Question (generate a question)'
+  3b2459cc-6600-443c-abf8-8f60c34cd99a: !Template
+    answer_choices: null
+    id: 3b2459cc-6600-443c-abf8-8f60c34cd99a
+    jinja: '{% if metadata.split != "test" %}
+
+      I know that the answer to the question "{{question}}" is in "{{context}}". Can
+      you tell me what it is? |||
+
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: tell_what_it_is
+    reference: 'Input: QC, Output: A (rephrase)'
+  5bdb1815-5c6f-49a3-ad1d-36734442070a: !Template
+    answer_choices: null
+    id: 5bdb1815-5c6f-49a3-ad1d-36734442070a
+    jinja: '{% if metadata.split != "test" %}
+
+      Question: "{{question}}"
+
+
+      Context: "{{context}}"
+
+
+      Answer:
+
+      |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: question_context_answer
+    reference: 'Input: QC, Output: Answer (short form)'
+  a0872cde-2f19-4ae6-919a-868da47bfbca: !Template
+    answer_choices: null
+    id: a0872cde-2f19-4ae6-919a-868da47bfbca
+    jinja: '{% if metadata.split != "test" %}
+
+      Extract the answer to the question from the following context.
+
+      Question: {{question}}
+
+      Context: {{context}}|||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: based_on
+    reference: ''
+  a64d5a15-68e2-4d1c-b30a-ca8250c860fa: !Template
+    answer_choices: null
+    id: a64d5a15-68e2-4d1c-b30a-ca8250c860fa
+    jinja: '{% if metadata.split != "test" %}
+
+      Given the following passage
+
+
+      "{{context}}",
+
+
+      answer the following question. Note that the answer is present within the text.
+
+
+      Question: {{question}} |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: answer_the_following_q
+    reference: 'Input: QC, Output: Answer'
diff --git a/promptsource/templates/adversarial_qa/dbidaf/templates.yaml b/promptsource/templates/adversarial_qa/dbidaf/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b38a699034c47ccd3554510e07d52a22652e96b9
--- /dev/null
+++ b/promptsource/templates/adversarial_qa/dbidaf/templates.yaml
@@ -0,0 +1,110 @@
+dataset: adversarial_qa
+subset: dbidaf
+templates:
+  41f28b31-d0fc-4f20-a0a2-ff21813e298e: !Template
+    answer_choices: null
+    id: 41f28b31-d0fc-4f20-a0a2-ff21813e298e
+    jinja: '{% if metadata.split != "test" %}
+
+      Extract the answer to the question from the following context.
+
+      Question: {{question}}
+
+      Context: {{context}}|||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: based_on
+    reference: ''
+  a64d5a15-68e2-4d1c-b30a-ca8250c860d9: !Template
+    answer_choices: null
+    id: a64d5a15-68e2-4d1c-b30a-ca8250c860d9
+    jinja: '{% if metadata.split != "test" %}
+
+      Given the following passage
+
+
+      "{{context}}",
+
+
+      answer the following question. Note that the answer is present within the text.
+
+
+      Question: {{question}} |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: answer_the_following_q
+    reference: 'Input: QC, Output: Answer'
+  c7a80603-d610-4999-98a7-815b2f84592d: !Template
+    answer_choices: null
+    id: c7a80603-d610-4999-98a7-815b2f84592d
+    jinja: 'I want to test the ability of students to read a passage and answer questions
+      about it. Could you please come up with a good question for the passage "{{context}}"?
+      |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question
+    reference: 'Input: Context, Output: Question (generate a question)'
+  ce9bc00a-567b-4c4e-aad7-df6f5d5d57bb: !Template
+    answer_choices: null
+    id: ce9bc00a-567b-4c4e-aad7-df6f5d5d57bb
+    jinja: '{% if metadata.split != "test" %}
+
+      I know that the answer to the question "{{question}}" is in "{{context}}". Can
+      you tell me what it is? |||
+
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: tell_what_it_is
+    reference: 'Input: QC, Output: A (rephrase)'
+  fa185424-6ebe-49b8-b4ed-7632ca33c361: !Template
+    answer_choices: null
+    id: fa185424-6ebe-49b8-b4ed-7632ca33c361
+    jinja: '{% if metadata.split != "test" %}
+
+      Question: "{{question}}"
+
+
+      Context: "{{context}}"
+
+
+      Answer:
+
+      |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: question_context_answer
+    reference: 'Input: QC, Output: Answer (short form)'
diff --git a/promptsource/templates/adversarial_qa/droberta/templates.yaml b/promptsource/templates/adversarial_qa/droberta/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1ab9886ba25c9c656fff6d0d36aa531391e2a640
--- /dev/null
+++ b/promptsource/templates/adversarial_qa/droberta/templates.yaml
@@ -0,0 +1,110 @@
+dataset: adversarial_qa
+subset: droberta
+templates:
+  00755780-f3c0-44b4-b159-8f3873cdb163: !Template
+    answer_choices: null
+    id: 00755780-f3c0-44b4-b159-8f3873cdb163
+    jinja: 'I want to test the ability of students to read a passage and answer questions
+      about it. Could you please come up with a good question for the passage "{{context}}"?
+      |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question
+    reference: 'Input: Context, Output: Question (generate a question)'
+  3b2459cc-6600-443c-abf8-8f60c34cd993: !Template
+    answer_choices: null
+    id: 3b2459cc-6600-443c-abf8-8f60c34cd993
+    jinja: '{% if metadata.split != "test" %}
+
+      I know that the answer to the question "{{question}}" is in "{{context}}". Can
+      you tell me what it is? |||
+
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: tell_what_it_is
+    reference: 'Input: QC, Output: A (rephrase)'
+  5bdb1815-5c6f-49a3-ad1d-367344420703: !Template
+    answer_choices: null
+    id: 5bdb1815-5c6f-49a3-ad1d-367344420703
+    jinja: '{% if metadata.split != "test" %}
+
+      Question: "{{question}}"
+
+
+      Context: "{{context}}"
+
+
+      Answer:
+
+      |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: question_context_answer
+    reference: 'Input: QC, Output: Answer (short form)'
+  a0872cde-2f19-4ae6-919a-868da47bfbc3: !Template
+    answer_choices: null
+    id: a0872cde-2f19-4ae6-919a-868da47bfbc3
+    jinja: '{% if metadata.split != "test" %}
+
+      Extract the answer to the question from the following context.
+
+      Question: {{question}}
+
+      Context: {{context}}|||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: based_on
+    reference: ''
+  a64d5a15-68e2-4d1c-b30a-ca8250c860f3: !Template
+    answer_choices: null
+    id: a64d5a15-68e2-4d1c-b30a-ca8250c860f3
+    jinja: '{% if metadata.split != "test" %}
+
+      Given the following passage
+
+
+      "{{context}}",
+
+
+      answer the following question. Note that the answer is present within the text.
+
+
+      Question: {{question}} |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: answer_the_following_q
+    reference: 'Input: QC, Output: Answer'
diff --git a/promptsource/templates/aeslc/templates.yaml b/promptsource/templates/aeslc/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..16c34b98e44dde3eba6f1fbd06258b13dffe3d7b
--- /dev/null
+++ b/promptsource/templates/aeslc/templates.yaml
@@ -0,0 +1,131 @@
+dataset: aeslc
+templates:
+  0bef38b8-6d0b-440b-8a3d-db034aaf5a15: !Template
+    answer_choices: null
+    id: 0bef38b8-6d0b-440b-8a3d-db034aaf5a15
+    jinja: '{{ email_body }}
+
+
+      What is this email about? |||
+
+
+      {{ subject_line }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: context_question_2
+    reference: ''
+  11de8b2c-8016-4b98-b5f2-c1a7e5c0e433: !Template
+    answer_choices: null
+    id: 11de8b2c-8016-4b98-b5f2-c1a7e5c0e433
+    jinja: 'What is the subject of this email:
+
+
+      {{ email_body }} |||
+
+
+      {{ subject_line }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: question_context_1
+    reference: ''
+  12616e45-1d61-4924-8ce4-fe3efd061e7a: !Template
+    answer_choices: null
+    id: 12616e45-1d61-4924-8ce4-fe3efd061e7a
+    jinja: 'The text below is the content of an email. What is the topic of this email?
+
+
+      {{ email_body }} |||
+
+
+      {{ subject_line }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: question_context_4
+    reference: ''
+  25179c66-5638-4de5-bdce-d6dccec64c65: !Template
+    answer_choices: null
+    id: 25179c66-5638-4de5-bdce-d6dccec64c65
+    jinja: 'Choose a subject line for the email body below:
+
+
+      {{ email_body }} |||
+
+
+      {{ subject_line }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: question_context_3
+    reference: ''
+  8917d7f0-5f72-418f-a2d9-98d4a8da13b0: !Template
+    answer_choices: null
+    id: 8917d7f0-5f72-418f-a2d9-98d4a8da13b0
+    jinja: 'What is this email about:
+
+
+      {{ email_body }} |||
+
+
+      {{ subject_line }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: question_context_2
+    reference: ''
+  d1c5da3f-f1e4-4891-abcb-79463b30a616: !Template
+    answer_choices: null
+    id: d1c5da3f-f1e4-4891-abcb-79463b30a616
+    jinja: '{{ email_body }}
+
+
+      What is the subject of this email? |||
+
+
+      {{ subject_line }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: context_question_1
+    reference: ''
+  d9dd8e72-acb4-4aad-aeb7-a877bacbb402: !Template
+    answer_choices: null
+    id: d9dd8e72-acb4-4aad-aeb7-a877bacbb402
+    jinja: '{{ email_body }}
+
+
+      Choose a subject line for the email body above. |||
+
+
+      {{ subject_line }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: context_question_3
+    reference: ''
+  dca29ebb-2372-423f-b93c-21d99eddf455: !Template
+    answer_choices: null
+    id: dca29ebb-2372-423f-b93c-21d99eddf455
+    jinja: '{{ email_body }}
+
+
+      The above text is the content of an email. What is the topic of this email?
+      |||
+
+
+      {{ subject_line }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: context_question_4
+    reference: ''
diff --git a/promptsource/templates/ag_news/templates.yaml b/promptsource/templates/ag_news/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9ef8a9043227e8a82b9af94c57df5bbd7e72a780
--- /dev/null
+++ b/promptsource/templates/ag_news/templates.yaml
@@ -0,0 +1,94 @@
+dataset: ag_news
+templates:
+  24e44a81-a18a-42dd-a71c-5b31b2d2cb39: !Template
+    answer_choices: World politics ||| Sports ||| Business ||| Science and technology
+    id: 24e44a81-a18a-42dd-a71c-5b31b2d2cb39
+    jinja: "What label best describes this news article?\n{{text}} ||| \n{{answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: classify_question_first
+    reference: ''
+  8fdc1056-1029-41a1-9c67-354fc2b8ceaf: !Template
+    answer_choices: World politics ||| Sports ||| Business ||| Science and technology
+    id: 8fdc1056-1029-41a1-9c67-354fc2b8ceaf
+    jinja: "Is this a piece of news regarding {{\"world politics, sports, business,\
+      \ or science and technology\"}}?\n{{text}} \n||| \n{{answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: classify_with_choices_question_first
+    reference: ''
+  918267e0-af68-4117-892d-2dbe66a58ce9: !Template
+    answer_choices: Politician ||| Athlete ||| Business executive ||| Scientist
+    id: 918267e0-af68-4117-892d-2dbe66a58ce9
+    jinja: 'Would you recommend the following article to a {{"politician"}}, an {{"athlete"}},
+      a {{"business executive"}}, or a {{"scientist"}}?
+
+
+      {{ text }}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: recommend
+    reference: ''
+  9345df33-4f23-4944-a33c-eef94e626862: !Template
+    answer_choices: World News ||| Sports ||| Business ||| Science and Technology
+    id: 9345df33-4f23-4944-a33c-eef94e626862
+    jinja: "{{text}} \n\nWhich of the following sections of a newspaper would this\
+      \ article likely appear in? {{\"World News\"}}, {{\"Sports\"}}, {{\"Business\"\
+      }}, or {{\"Science and Technology\"}}? ||| \n{{answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: which_section_choices
+    reference: ''
+  98534347-fff7-4c39-a795-4e69a44791f7: !Template
+    answer_choices: World News ||| Sports ||| Business ||| Science and Technology
+    id: 98534347-fff7-4c39-a795-4e69a44791f7
+    jinja: "{{text}} \n\nWhich section of a newspaper would this article likely appear\
+      \ in? ||| \n{{answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: which_section
+    reference: ''
+  b401b0ee-6ffe-4a91-8e15-77ee073cd858: !Template
+    answer_choices: World politics ||| Sports ||| Business ||| Science and technology
+    id: b401b0ee-6ffe-4a91-8e15-77ee073cd858
+    jinja: "{{text}} \nIs this a piece of news regarding {{\"world politics, sports,\
+      \ business, or science and technology\"}}? ||| \n{{answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: classify_with_choices
+    reference: ''
+  cb355f33-7e8c-4455-a72b-48d315bd4f60: !Template
+    answer_choices: World politics ||| Sports ||| Business ||| Science and technology
+    id: cb355f33-7e8c-4455-a72b-48d315bd4f60
+    jinja: "{{text}} \nWhat label best describes this news article? ||| \n{{answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: classify
+    reference: ''
diff --git a/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml b/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..345d8904d3d7e4397054e715ad362269f3e59013
--- /dev/null
+++ b/promptsource/templates/ai2_arc/ARC-Challenge/templates.yaml
@@ -0,0 +1,130 @@
+dataset: ai2_arc
+subset: ARC-Challenge
+templates:
+  32f7eb4d-dd38-4503-b67d-a8a96ab40449: !Template
+    answer_choices: null
+    id: 32f7eb4d-dd38-4503-b67d-a8a96ab40449
+    jinja: 'Pick and copy all the incorrect options for the following question:
+
+
+      {{question}}
+
+
+      Options:
+
+      - {{choices["text"] | join("\n- ")}}|||
+
+      {% for i in range(choices["label"]|length) %}
+
+      {% if i != choices["label"].index(answerKey) %}
+
+      - {{choices["text"][i]}}
+
+      {% endif %}
+
+      {% endfor %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: false
+    name: pick_false_options
+    reference: ''
+  540ebc31-2ea6-4feb-a6fd-67b6e71cf20a: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 540ebc31-2ea6-4feb-a6fd-67b6e71cf20a
+    jinja: "Here's a problem to solve: {{question}}\n\nAmong the 4 following options,\
+      \ which is the correct answer?\n{% for letter, t in zip(answer_choices, choices.text)\
+      \ %}\n- {{letter}}: {{t}}\n {% endfor %}|||{{answerKey}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: heres_a_problem
+    reference: ''
+  5ec2b8ca-e4c0-444e-b097-89ccce811550: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 5ec2b8ca-e4c0-444e-b097-89ccce811550
+    jinja: '{{question}}
+
+
+      Options:
+
+      - {{answer_choices | join("\n- ")}}|||
+
+      {{answer_choices[choices["label"].index(answerKey)]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: qa_options
+    reference: ''
+  5ff84886-9d5f-40d1-80d7-2a39b7c16ec6: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 5ff84886-9d5f-40d1-80d7-2a39b7c16ec6
+    jinja: 'I am hesitating between 4 options to answer the following question, which
+      option should I choose?
+
+      Question: {{question}}
+
+      Possibilities:
+
+      - {{answer_choices | join("\n- ")}}|||
+
+      {{answer_choices[choices["label"].index(answerKey)]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: i_am_hesitating
+    reference: ''
+  ced2b33b-b590-4522-b041-51d7dd669561: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: ced2b33b-b590-4522-b041-51d7dd669561
+    jinja: 'I gave my students this multiple choice question: {{question}}
+
+
+      Only one answer is correct among these 4 choices:
+
+      - {{answer_choices | join("\n- ")}}
+
+
+      Could you tell me which one is correct?|||
+
+      {{answer_choices[choices["label"].index(answerKey)]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: multiple_choice
+    reference: ''
+  e371fc1a-8edb-477b-b345-9d73e97ffade: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: e371fc1a-8edb-477b-b345-9d73e97ffade
+    jinja: 'Pick the most correct option to answer the following question.
+
+
+      {{question}}
+
+
+      Options:
+
+      {% for letter, t in zip(answer_choices, choices.text) %}
+
+      - {{letter}}: {{t}}
+
+      {% endfor %} |||
+
+      {{answerKey}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: pick_the_most_correct_option
+    reference: ''
diff --git a/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml b/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..92c00c44893d0de5d479ad5d38edfbd912efcb31
--- /dev/null
+++ b/promptsource/templates/ai2_arc/ARC-Easy/templates.yaml
@@ -0,0 +1,130 @@
+dataset: ai2_arc
+subset: ARC-Easy
+templates:
+  033498ca-3d9a-47e3-b631-d881ab53b5ad: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 033498ca-3d9a-47e3-b631-d881ab53b5ad
+    jinja: 'Pick the most correct option to answer the following question.
+
+
+      {{question}}
+
+
+      Options:
+
+      {% for letter, t in zip(answer_choices, choices.text) %}
+
+      - {{letter}}: {{t}}
+
+      {% endfor %} |||
+
+      {{answerKey}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: pick_the_most_correct_option
+    reference: ''
+  252aa566-9482-4e81-aad9-664a9bebd8e8: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 252aa566-9482-4e81-aad9-664a9bebd8e8
+    jinja: '{{question}}
+
+
+      Options:
+
+      - {{answer_choices | join("\n- ")}}|||
+
+      {{answer_choices[choices["label"].index(answerKey)]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: qa_options
+    reference: ''
+  4fb13ac1-f770-45ea-b5d5-91ac50b0d609: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 4fb13ac1-f770-45ea-b5d5-91ac50b0d609
+    jinja: 'I am hesitating between 4 options to answer the following question, which
+      option should I choose?
+
+      Question: {{question}}
+
+      Possibilities:
+
+      - {{answer_choices | join("\n- ")}}|||
+
+      {{answer_choices[choices["label"].index(answerKey)]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: i_am_hesitating
+    reference: ''
+  8c689423-880d-402b-8c7d-a1a98c7589e8: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 8c689423-880d-402b-8c7d-a1a98c7589e8
+    jinja: 'I gave my students this multiple choice question: {{question}}
+
+
+      Only one answer is correct among these 4 choices:
+
+      - {{answer_choices | join("\n- ")}}
+
+
+      Could you tell me which one is correct?|||
+
+      {{answer_choices[choices["label"].index(answerKey)]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: multiple_choice
+    reference: ''
+  c988ee30-a523-457b-af21-87353349b543: !Template
+    answer_choices: null
+    id: c988ee30-a523-457b-af21-87353349b543
+    jinja: 'Pick and copy all the incorrect options for the following question:
+
+
+      {{question}}
+
+
+      Options:
+
+      - {{choices["text"] | join("\n- ")}}|||
+
+      {% for i in range(choices["label"]|length) %}
+
+      {% if i != choices["label"].index(answerKey) %}
+
+      - {{choices["text"][i]}}
+
+      {% endif %}
+
+      {% endfor %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: false
+    name: pick_false_options
+    reference: ''
+  d90da519-0e2c-4f9b-a546-7cba82824eb2: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: d90da519-0e2c-4f9b-a546-7cba82824eb2
+    jinja: "Here's a problem to solve: {{question}}\n\nAmong the 4 following options,\
+      \ which is the correct answer?\n{% for letter, t in zip(answer_choices, choices.text)\
+      \ %}\n- {{letter}}: {{t}}\n {% endfor %}|||{{answerKey}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: heres_a_problem
+    reference: ''
diff --git a/promptsource/templates/amazon_polarity/templates.yaml b/promptsource/templates/amazon_polarity/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a9cd7279784fdb272ca7a19769af3cf5e8d2ad86
--- /dev/null
+++ b/promptsource/templates/amazon_polarity/templates.yaml
@@ -0,0 +1,174 @@
+dataset: amazon_polarity
+templates:
+  1e90a24a-1182-43dd-9445-22f2e56e5761: !Template
+    answer_choices: Negative ||| Positive
+    id: 1e90a24a-1182-43dd-9445-22f2e56e5761
+    jinja: 'Title: {{title}}
+
+      Review: {{content}}
+
+      Is the review positive or negative? |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Is_this_review
+    reference: ''
+  3a48f287-6a4b-4df0-ab2d-2eaf6cb8e53d: !Template
+    answer_choices: No ||| Yes
+    id: 3a48f287-6a4b-4df0-ab2d-2eaf6cb8e53d
+    jinja: 'Based on this review, would the user recommend this product?
+
+      ===
+
+      Review: {{content}}
+
+      Answer: |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: User_recommend_this_product
+    reference: 'Reformulation equivalent to sent analysis: would the user recommend
+      this product?'
+  592caf8f-f8ff-426a-a61b-b7e95ed510b6: !Template
+    answer_choices: No ||| Yes
+    id: 592caf8f-f8ff-426a-a61b-b7e95ed510b6
+    jinja: 'Is this product review positive?
+
+      Title: {{title}}
+
+      Review: {{content}}
+
+      Answer: |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Is_this_product_review_positive
+    reference: ''
+  745b9c05-10df-4a7e-81ad-1b88cefcb166: !Template
+    answer_choices: Yes ||| No
+    id: 745b9c05-10df-4a7e-81ad-1b88cefcb166
+    jinja: 'Title: {{title}}
+
+      Review: {{content}}
+
+      Is this product review negative?|||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Is_this_review_negative
+    reference: ''
+  8abb5377-5dd3-4402-92a5-0d81adb6a325: !Template
+    answer_choices: Negative ||| Positive
+    id: 8abb5377-5dd3-4402-92a5-0d81adb6a325
+    jinja: 'Title: {{title}}
+
+      Review: {{content}}
+
+      Does this product review convey a negative or positive sentiment?|||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: convey_negative_or_positive_sentiment
+    reference: ''
+  9df70cdf-f8ed-4e79-8e2f-b4668058d637: !Template
+    answer_choices: Negative ||| Positive
+    id: 9df70cdf-f8ed-4e79-8e2f-b4668058d637
+    jinja: 'Is there a negative or positive tone to this product review?
+
+      ===
+
+      Title: {{title}}
+
+      Review: {{content}}
+
+      Answer: |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: negative_or_positive_tone
+    reference: ''
+  b13369e8-0500-4e93-90d4-8e6814bfb97b: !Template
+    answer_choices: dissatisfied ||| satisfied
+    id: b13369e8-0500-4e93-90d4-8e6814bfb97b
+    jinja: 'Here is a review left by a customer on a product. Would you say he was
+      {{answer_choices[1]}} or {{answer_choices[0]}}?
+
+      Title: {{title}}
+
+      Review: {{content}}
+
+      |||
+
+      {{answer_choices[label]}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: user_satisfied
+    reference: ''
+  b13369e8-0500-4e93-90d4-8e6814bfb98b: !Template
+    answer_choices: decrease ||| increase
+    id: b13369e8-0500-4e93-90d4-8e6814bfb98b
+    jinja: 'You are considering whether to buy a product. You look at the reviews.
+      Would the following review {{answer_choices[0]}} or {{answer_choices[1]}} the
+      chances of you buying the product?
+
+      Review title: {{title}}
+
+      Product review: {{content}}
+
+      |||
+
+      {{answer_choices[label]}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: would_you_buy
+    reference: ''
+  b13369e8-0500-4e93-90d4-8e6814bfb99b: !Template
+    answer_choices: unflattering ||| flattering
+    id: b13369e8-0500-4e93-90d4-8e6814bfb99b
+    jinja: 'Title: {{title}}
+
+      Product review: {{content}}
+
+      Would you say this review depicts the product in a {{answer_choices[1]}} or
+      {{answer_choices[0]}} light?
+
+      |||
+
+      {{answer_choices[label]}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: flattering_or_not
+    reference: ''
diff --git a/promptsource/templates/amazon_reviews_multi/en/templates.yaml b/promptsource/templates/amazon_reviews_multi/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..39aba462fcd44dac8a042b5b0c3abf0eb1774600
--- /dev/null
+++ b/promptsource/templates/amazon_reviews_multi/en/templates.yaml
@@ -0,0 +1,85 @@
+dataset: amazon_reviews_multi
+subset: en
+templates:
+  073dfd34-5aef-461a-81d9-bdb8e00f12c9: !Template
+    answer_choices: null
+    id: 073dfd34-5aef-461a-81d9-bdb8e00f12c9
+    jinja: 'Write a review title for the review below:
+
+      ===
+
+      {{review_body}} |||
+
+      {{review_title}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_2
+    reference: Review Title based on Review body
+  0f5b005b-c6bc-4fe0-bde4-0917cdba39e8: !Template
+    answer_choices: null
+    id: 0f5b005b-c6bc-4fe0-bde4-0917cdba39e8
+    jinja: 'Rate the product by the number of stars based on the review title below:
+      (1 being the lowest and 5 the highest)
+
+      ===
+
+      {{review_title}} |||
+
+      {{stars}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_5
+    reference: Rating based on review title
+  199ad6de-5bcc-421e-90e2-4b6edada6a01: !Template
+    answer_choices: null
+    id: 199ad6de-5bcc-421e-90e2-4b6edada6a01
+    jinja: 'Rate the product by the number of stars based on the review body below:
+      (1 being the lowest and 5 the highest)
+
+      ===
+
+      {{review_body}} |||
+
+      {{stars}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_4
+    reference: Rating based on review body
+  7ecaf718-c85d-47f4-83cb-f14c58f2911f: !Template
+    answer_choices: null
+    id: 7ecaf718-c85d-47f4-83cb-f14c58f2911f
+    jinja: 'Guess the product category for which the below review is:
+
+      ===
+
+      {{review_body}} |||
+
+      {{product_category}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_1
+    reference: Product category based on review body
+  c4717e75-4d3e-4b79-9737-167155f51513: !Template
+    answer_choices: null
+    id: c4717e75-4d3e-4b79-9737-167155f51513
+    jinja: 'Guess the product category from the below review title:
+
+      ===
+
+      {{review_title}} |||
+
+      {{product_category}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_3
+    reference: Product category from review title
diff --git a/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml b/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3b076b864482e4dbcb34dc92e946d21cfb5d85ea
--- /dev/null
+++ b/promptsource/templates/amazon_us_reviews/Wireless_v1_00/templates.yaml
@@ -0,0 +1,69 @@
+dataset: amazon_us_reviews
+subset: Wireless_v1_00
+templates:
+  5feaa0d7-e4e0-46cc-8517-e00bfa7fd00e: !Template
+    answer_choices: null
+    id: 5feaa0d7-e4e0-46cc-8517-e00bfa7fd00e
+    jinja: 'Summarize a review headline for the review below:        ===        {{review_body}}
+      |||        {{review_headline}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_6
+    reference: Generate review headline based on review body
+  957e3322-6907-4e67-bfbe-6ed8862f352c: !Template
+    answer_choices: null
+    id: 957e3322-6907-4e67-bfbe-6ed8862f352c
+    jinja: 'Guess the product category for which the below review is:        ===        {{review_body}}
+      |||        {{product_category}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_2
+    reference: Predict the product category based on review
+  9588a967-d698-4a33-9b96-a5254df9d260: !Template
+    answer_choices: null
+    id: 9588a967-d698-4a33-9b96-a5254df9d260
+    jinja: Generate a {{star_rating}}-star review (1 being lowest and 5 being highest)
+      about this product in {{product_category}} category.        |||        {{review_body}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_1
+    reference: Generate review based on rating and category
+  9a8b953d-2c68-4046-a7b7-8fd5f7469d10: !Template
+    answer_choices: null
+    id: 9a8b953d-2c68-4046-a7b7-8fd5f7469d10
+    jinja: 'How would you rate this review from 1 to 5 (1 being lowest and 5 being
+      highest): {{review_headline}}?        |||        {{star_rating}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_5
+    reference: 'Given the review headline, return a categorical rating. '
+  e40e4a53-ca5d-4fc8-a7c3-be9adfe0dbec: !Template
+    answer_choices: null
+    id: e40e4a53-ca5d-4fc8-a7c3-be9adfe0dbec
+    jinja: Generate a {{star_rating}}-star review headline (1 being lowest and 5 being
+      highest) about this product.        |||        {{review_headline}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_3
+    reference: 'Generate review headline based on rating. '
+  e6a1bbde-715d-4dad-9178-e2bcfaf5c646: !Template
+    answer_choices: null
+    id: e6a1bbde-715d-4dad-9178-e2bcfaf5c646
+    jinja: 'How would you rate this review from 1 to 5 (1 being lowest and 5 being
+      highest): {{review_body}}?        |||        {{star_rating}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_4
+    reference: 'Given the review body, return a categorical rating. '
diff --git a/promptsource/templates/ambig_qa/light/templates.yaml b/promptsource/templates/ambig_qa/light/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e384b2d9f040a2985eefe37c5be6505012b9a08
--- /dev/null
+++ b/promptsource/templates/ambig_qa/light/templates.yaml
@@ -0,0 +1,94 @@
+dataset: ambig_qa
+subset: light
+templates:
+  5f79fa25-3804-4e32-9493-a12c1c2ddff0: !Template
+    answer_choices: null
+    id: 5f79fa25-3804-4e32-9493-a12c1c2ddff0
+    jinja: "{# Assignement in if clause breaks test, we need to declare variables\
+      \ in global scope first: https://github.com/pallets/jinja/issues/1314 #}\n{%\
+      \ set selected_question = \"\" %}\n{% set selected_answer = \"\" %}\n{% set\
+      \ random_question_id = -1 %}\n{% if annotations.type[0] == \"multipleQAs\" %}\n\
+      \   {% set random_question_id = range(0, annotations.qaPairs[0].question | length)\
+      \ | choice%}\n   {% set selected_question = annotations.qaPairs[0].question[random_question_id]\
+      \ %}\n   {% set selected_answer = annotations.qaPairs[0].answer[random_question_id]\
+      \ | choice %}\n{% else %}\n    {% set selected_question = question %}\n    {%\
+      \ set selected_answer = annotations.answer | choice %}\n{% endif %}\n\n{{selected_question}}\n\
+      |||\n{{selected_answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: ambig_qa_light3
+    reference: Randomly choose an annotated question and answer it using one of its
+      answers.
+  72bf511b-44ce-4b9f-a2d0-5ed6334f0e07: !Template
+    answer_choices: null
+    id: 72bf511b-44ce-4b9f-a2d0-5ed6334f0e07
+    jinja: "{# Assignement in if clause breaks test, we need to declare variables\
+      \ in global scope first: https://github.com/pallets/jinja/issues/1314 #}\n{%\
+      \ set random_question_id = -1 %}\n{% set random_answer_id = -1 %}\n{% set selected_question\
+      \ = \"\" %}\n{% set selected_answer = \"\" %}\n{% if annotations.type[0] ==\
+      \ \"multipleQAs\" %}\n   {% set random_question_id = range(0, annotations.qaPairs[0].question\
+      \ | length) | choice%}\n   {% set random_answer_id = range(0, annotations.qaPairs[0].answer\
+      \ | length) | choice%}\n   {% set selected_question = annotations.qaPairs[0].question[random_question_id]\
+      \ %}\n   {% set selected_answer = annotations.qaPairs[0].answer[random_answer_id]\
+      \ | choice%}\n{% else %}\n   {% set random_question_id = 0 %}\n   {% set random_answer_id\
+      \ = 0 %}\n   {% set selected_question = question %}\n   {% set selected_answer\
+      \ = annotations.answer[0] | choice %}\n{% endif %}\n\nIs \"{{selected_answer}}\"\
+      \ the answer to \"{{selected_question}}\"?\n\n|||\n\n{% if random_answer_id\
+      \ == random_question_id %} Yes {% else %} No {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: ambig_qa_light4
+    reference: Classify if the given answer if correct compared to the chosen question
+  7655d2aa-70df-42cf-9bfa-80484521f856: !Template
+    answer_choices: null
+    id: 7655d2aa-70df-42cf-9bfa-80484521f856
+    jinja: "{{question}}\n\n|||\n\n{# Assignement in if clause breaks test, we need\
+      \ to declare variables in global scope first: https://github.com/pallets/jinja/issues/1314\
+      \ #}\n{% set random_answer = \"\" %}\n{% set random_answer_form = \"\" %}\n\
+      {% if annotations.type[0] == \"singleAnswer\" %}\n    {% set random_answer_form\
+      \ = annotations.answer[0] | choice %}\n{% else %}\n    {% set random_answer\
+      \ = annotations.qaPairs[0].answer | choice %}\n    {% set random_answer_form\
+      \ = random_answer | choice %}\n{% endif %}\n\n{{random_answer_form}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: ambig_qa_light1
+    reference: Given the question, we choose the answer in single QA and randomly
+      choose when in multipleQA.
+  bb089312-23cb-475d-93b5-952781bc6be4: !Template
+    answer_choices: null
+    id: bb089312-23cb-475d-93b5-952781bc6be4
+    jinja: "{# Assignement in if clause breaks test, we need to declare variables\
+      \ in global scope first: https://github.com/pallets/jinja/issues/1314 #}\n{%\
+      \ set selected_question = \"\" %}\n{% set selected_answer = \"\" %}\n{% set\
+      \ random_question_id = -1 %}\n{% if annotations.type[0] == \"multipleQAs\" %}\n\
+      \   {% set random_question_id = range(0, annotations.qaPairs[0].question | length)\
+      \ | choice%}\n   {% set selected_question = annotations.qaPairs[0].question[random_question_id]%}\n\
+      \   {% set selected_answer = annotations.qaPairs[0].answer[random_question_id]\
+      \ | choice%}\n{% else %}\n   {% set selected_question = question %}\n   {% set\
+      \ selected_answer = annotations.answer | choice %}\n{% endif %}\nKnowing that\
+      \ \"{{selected_answer}}\" is the answer, what could have been the question?\n\
+      |||\n{{selected_question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: ambig_qa_light5
+    reference: Generate the answer from the question
+  f53d00ea-98a8-45d3-92f6-93a8909aef2a: !Template
+    answer_choices: null
+    id: f53d00ea-98a8-45d3-92f6-93a8909aef2a
+    jinja: "{{question}}\n\n|||\n\n{% if annotations.type[0] == \"singleAnswer\" %}\n\
+      \    {{annotations.answer[0] | choice}}\n{% else %}\n    The questions was ambiguous.\
+      \ Did you mean \"{{annotations.qaPairs[0].question |choice}}\"?\n{% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: ambig_qa_light2
+    reference: If a question is ambiguous, ask another question, otherwise answer.
diff --git a/promptsource/templates/anli/templates.yaml b/promptsource/templates/anli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..528e7cf3218ca407adccd0931bdabc04c4f3b43b
--- /dev/null
+++ b/promptsource/templates/anli/templates.yaml
@@ -0,0 +1,191 @@
+dataset: anli
+templates:
+  0cc3ae39-3997-4686-8c93-5d51457efa1f: !Template
+    answer_choices: Correct ||| Inconclusive ||| Incorrect
+    id: 0cc3ae39-3997-4686-8c93-5d51457efa1f
+    jinja: '{{premise}} Using only the above description and what you know about the
+      world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: MNLI crowdsource
+    reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+  179eb863-3ece-4e6f-af0f-fcb46d997306: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: 179eb863-3ece-4e6f-af0f-fcb46d997306
+    jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+      no, or maybe? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: should assume
+    reference: Webson & Pavlick 2021
+  5459237b-97de-4340-bf7b-2939c3f7ca19: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: 5459237b-97de-4340-bf7b-2939c3f7ca19
+    jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+      ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does it follow that
+    reference: v0.1
+  620aa3fc-d5eb-46f5-a1ee-4c754527aa97: !Template
+    answer_choices: True ||| Neither ||| False
+    id: 620aa3fc-d5eb-46f5-a1ee-4c754527aa97
+    jinja: '{{premise}}
+
+      Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style
+    reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+      is no task identifying tokens like "anli R1: ".'
+  9b613182-c6ab-4427-9221-3d68f6d62765: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: 9b613182-c6ab-4427-9221-3d68f6d62765
+    jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+      Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+  a850110d-f1a3-49b4-949a-d3bfe9f81344: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: a850110d-f1a3-49b4-949a-d3bfe9f81344
+    jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+      or maybe? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: justified in saying
+    reference: Webson & Pavlick 2021
+  bab86d5a-4f9c-40db-b619-a7b7d5cae681: !Template
+    answer_choices: True ||| Inconclusive ||| False
+    id: bab86d5a-4f9c-40db-b619-a7b7d5cae681
+    jinja: 'Take the following as truth: {{premise}}
+
+      Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+      {{"inconclusive"}}? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: take the following as truth
+    reference: Bers et al.
+  bcd90047-3a2b-426b-b065-8a418f1317b8: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: bcd90047-3a2b-426b-b065-8a418f1317b8
+    jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+      Yes, no, or maybe? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: must be true
+    reference: v0.1
+  c4ed37ae-d7d7-4197-a725-ef2152fa3b1f: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: c4ed37ae-d7d7-4197-a725-ef2152fa3b1f
+    jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+      ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: can we infer
+    reference: Webson & Pavlick 2021
+  ca24b93a-6265-462f-b140-e329c03d94fa: !Template
+    answer_choices: Guaranteed ||| Possible ||| Impossible
+    id: ca24b93a-6265-462f-b140-e329c03d94fa
+    jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+      \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {{ answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: guaranteed/possible/impossible
+    reference: Bers et al.
+  dbc68425-5c42-43ae-9748-70ce8c5a167e: !Template
+    answer_choices: Always ||| Sometimes ||| Never
+    id: dbc68425-5c42-43ae-9748-70ce8c5a167e
+    jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+      {{"sometimes"}}, or {{"never"}} true? ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: always/sometimes/never
+    reference: Bers et al.
+  e5b7fdd7-fdff-4630-889b-3c7a052e5da0: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: e5b7fdd7-fdff-4630-889b-3c7a052e5da0
+    jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+      \ no, or maybe? ||| {{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does this imply
+    reference: v0.1
+  e6f32b9c-7e0b-474a-a0d2-e84d20c22aba: !Template
+    answer_choices: Always ||| Sometimes ||| Never
+    id: e6f32b9c-7e0b-474a-a0d2-e84d20c22aba
+    jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+      \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {{\
+      \ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: consider always/sometimes/never
+    reference: Bers et al.
+  ec249357-e672-4e7d-b8b6-d97ed7d090c5: !Template
+    answer_choices: True ||| Inconclusive ||| False
+    id: ec249357-e672-4e7d-b8b6-d97ed7d090c5
+    jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+      {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: claim true/false/inconclusive
+    reference: Bers et al.
+  ffa0a6f0-7186-4ccb-bb35-8b1affb747a0: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: ffa0a6f0-7186-4ccb-bb35-8b1affb747a0
+    jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+      or maybe? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: guaranteed true
+    reference: Webson & Pavlick 2021
diff --git a/promptsource/templates/app_reviews/templates.yaml b/promptsource/templates/app_reviews/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..91437804f31b7a7e20b2b76bd0e75f7998fc75a2
--- /dev/null
+++ b/promptsource/templates/app_reviews/templates.yaml
@@ -0,0 +1,68 @@
+dataset: app_reviews
+templates:
+  2da8f134-58db-4f9d-b3b0-8c6b50693ab5: !Template
+    answer_choices: Not at all ||| No ||| Maybe ||| Yes ||| Definitely
+    id: 2da8f134-58db-4f9d-b3b0-8c6b50693ab5
+    jinja: 'Given this review: "{{review}}"
+
+      Would you recommend this app to a friend? {{answer_choices[0]}}, {{answer_choices[1]}},
+      {{answer_choices[2]}}, {{answer_choices[3]}}, or {{answer_choices[4]}}?
+
+      |||
+
+      {{answer_choices[star-1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Spearman Correlation
+      original_task: false
+    name: categorize_rating_using_review
+    reference: Given the review, return a categorical answer.
+  8086b434-a75e-45a4-87fb-4364601e2e05: !Template
+    answer_choices: null
+    id: 8086b434-a75e-45a4-87fb-4364601e2e05
+    jinja: 'Generate a {{star}}-star review (1 being lowest and 5 being highest) about
+      an app with package {{package_name}}.
+
+      |||
+
+      {{review}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate_review
+    reference: Generate a review from the rating.
+  9746ce4b-ac58-4dfb-9783-d77c95cb62cf: !Template
+    answer_choices: "\u2605 ||| \u2605\u2605 ||| \u2605\u2605\u2605 ||| \u2605\u2605\
+      \u2605\u2605 ||| \u2605\u2605\u2605\u2605\u2605"
+    id: 9746ce4b-ac58-4dfb-9783-d77c95cb62cf
+    jinja: "What would be the \u2605-rating of this review (\u2605 being the lowest\
+      \ and \u2605\u2605\u2605\u2605\u2605 being the highest)? \"{{review}}\"\n|||\n\
+      {{answer_choices[star-1]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Spearman Correlation
+      original_task: false
+    name: convert_to_star_rating
+    reference: Given the review, generate a star rating.
+  d34e1413-2699-4701-baa2-05d931d012ba: !Template
+    answer_choices: null
+    id: d34e1413-2699-4701-baa2-05d931d012ba
+    jinja: 'On a scale of 1-5 (with 1 being least favorable and 5 being most favorable),
+      how would you rate this review? "{{review}}"
+
+      |||
+
+      {{star}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Spearman Correlation
+      original_task: false
+    name: convert_to_rating
+    reference: Convert review to rating
diff --git a/promptsource/templates/aqua_rat/raw/templates.yaml b/promptsource/templates/aqua_rat/raw/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9594e36c0d1c32f91a8380ba83dbf0df18aec714
--- /dev/null
+++ b/promptsource/templates/aqua_rat/raw/templates.yaml
@@ -0,0 +1,125 @@
+dataset: aqua_rat
+subset: raw
+templates:
+  13bd5099-33fa-4383-a441-33a7d2e1746f: !Template
+    answer_choices: null
+    id: 13bd5099-33fa-4383-a441-33a7d2e1746f
+    jinja: 'Given the problem:
+
+      {{question}}
+
+
+      and the options:
+
+      {% for i in range(options|length) %}
+
+      {{options[i].replace('')'', '') '')}}
+
+      {% endfor %}
+
+
+      The correct answer is |||
+
+      {{correct}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: temp_6
+    reference: ''
+  58a6aa2b-ca26-473d-9bf8-385dd1a743cd: !Template
+    answer_choices: null
+    id: 58a6aa2b-ca26-473d-9bf8-385dd1a743cd
+    jinja: 'You will now be given a question and a set of options. Choose the correct
+      option and provide a rationale for the same.
+
+
+      Question:
+
+      {{question}}
+
+
+      Options:
+
+      {% for i in range(options|length) %}
+
+      {{options[i].replace('')'', '') '')}}
+
+      {% endfor %}
+
+
+      |||
+
+      {{correct}}
+
+
+      {{rationale}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: temp_4
+    reference: ''
+  5acfaa48-e1b6-44df-8e92-c58b94bff595: !Template
+    answer_choices: null
+    id: 5acfaa48-e1b6-44df-8e92-c58b94bff595
+    jinja: "Answer the given question by providing the correct rationale:\n\n{{question}}\n\
+      {% for i in range(options|length) %}\n   {{options[i].replace(')', ') ')}}\n\
+      {%endfor%}\n|||\n{{rationale}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: temp_2
+    reference: ''
+  815acaf5-2e59-4f81-8190-ae75dc237cf1: !Template
+    answer_choices: null
+    id: 815acaf5-2e59-4f81-8190-ae75dc237cf1
+    jinja: '{{question}}
+
+
+      The above question was asked in a Math test. Given the following options, can
+      you choose the correct one?
+
+
+      {% for i in range(options|length) %}
+
+      {{options[i].replace('')'', '') '')}}
+
+      {% endfor %}
+
+      |||
+
+      {{correct}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: temp_3
+    reference: ''
+  c0403841-68b0-4c08-8c3b-a00a81272d05: !Template
+    answer_choices: null
+    id: c0403841-68b0-4c08-8c3b-a00a81272d05
+    jinja: "Solve the following question and choose the correct option.\n\n{{question}}\
+      \ \n{% for i in range(options|length) %}\n{{options[i].replace(')', ') ')}}\n\
+      {%endfor%}\n||| \n{{correct}}\n\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic
+    reference: ''
+  c9352c6c-074b-4beb-8489-c151adeeedcb: !Template
+    answer_choices: null
+    id: c9352c6c-074b-4beb-8489-c151adeeedcb
+    jinja: "Question: \n{{question}}\n\nOptions: \n{% for i in range(options|length)\
+      \ %}\n{{options[i].replace(')', ') ')}}\n{% endfor %}\n\nThis is how I solved\
+      \ the above question:\n|||\n{{rationale}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: temp_5
+    reference: ''
diff --git a/promptsource/templates/art/templates.yaml b/promptsource/templates/art/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..2afe01193345dba80ec54f42c841871a52f72553
--- /dev/null
+++ b/promptsource/templates/art/templates.yaml
@@ -0,0 +1,218 @@
+dataset: art
+templates:
+  151d0e97-d7d2-47f2-86b4-6777587b16f2: !Template
+    answer_choices: null
+    id: 151d0e97-d7d2-47f2-86b4-6777587b16f2
+    jinja: "We know that:\n\n{{ observation_1  | trim('.?!') }},\n\nand:\n\n{{ observation_2\
+      \ }} \n\nWhat is more likely?\n\nFirst option: \n\n{{ hypothesis_1  | trim('.?!')\
+      \ }}, \n\nor second option:\n\n{{ hypothesis_2  | trim('.?!') }}?\n|||\n{{ [hypothesis_1,\
+      \ hypothesis_2][label-1]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: hyp4
+    reference: ''
+  2c74c78c-1757-4236-8925-594bbff9a621: !Template
+    answer_choices: null
+    id: 2c74c78c-1757-4236-8925-594bbff9a621
+    jinja: 'Which version is more accurate?
+
+
+      The first one:
+
+
+      {{ hypothesis_2  | trim(''.?!'') }},
+
+
+      or the second one:
+
+
+      {{ hypothesis_1  | trim(''.?!'') }}?
+
+
+      Assuming that:
+
+
+      {{ observation_1 }} {{ observation_2 }}
+
+      |||
+
+      {{ [hypothesis_1, hypothesis_2][label-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: hyp5_reversed
+    reference: ''
+  2e360dde-c137-405c-bd8b-9e31c9f2aa8c: !Template
+    answer_choices: No ||| Yes
+    id: 2e360dde-c137-405c-bd8b-9e31c9f2aa8c
+    jinja: "Given that: \n\n{{  observation_1   | trim('.?!') }}, \n\nand: \n\n{{\
+      \  observation_2  | trim('.?!') }}, \n\nis it true that:\n\n{{ hypothesis_2\
+      \  | trim('.?!')}}?\n|||\n{{ answer_choices[label-1] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hyp2_1
+    reference: ''
+  43fd9dac-ce01-4d9c-9a03-ae38d98bb5aa: !Template
+    answer_choices: No ||| Yes
+    id: 43fd9dac-ce01-4d9c-9a03-ae38d98bb5aa
+    jinja: "Does this statement: \n\n{{ hypothesis_2  | trim('.?!') }} \n\nexplain\
+      \ the situation described below?\n\n{{ observation_1 }}\n{{ observation_2 }}\n\
+      |||\n{{ answer_choices[label-1] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hyp2_2
+    reference: ''
+  5015a37a-c66b-4b44-9e92-08a403a7b6aa: !Template
+    answer_choices: null
+    id: 5015a37a-c66b-4b44-9e92-08a403a7b6aa
+    jinja: '{{ observation_1 }} {{ observation_2 }}
+
+
+      Would you rather believe that:
+
+
+      {{ hypothesis_2  | trim(''.?!'') }},
+
+
+      or:
+
+
+      {{ hypothesis_1  | trim(''.?!'') }}?
+
+      |||
+
+      {{ [hypothesis_1, hypothesis_2][label-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: hyp3_reversed
+    reference: ''
+  6dda5a3f-3511-4f9b-9062-a33fe98c477d: !Template
+    answer_choices: Yes ||| No
+    id: 6dda5a3f-3511-4f9b-9062-a33fe98c477d
+    jinja: "Given that: \n\n{{  observation_1  | trim('.?!') }}, \n\nand: \n\n{{ \
+      \ observation_2  | trim('.?!') }}, \n\nis it true that:\n\n{{ hypothesis_1 |\
+      \ trim('.?!') }}?\n|||\n{{ answer_choices[label-1] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hyp1_1
+    reference: ''
+  bf8a5b8a-70cb-4b27-82db-8ca4fbd2318d: !Template
+    answer_choices: null
+    id: bf8a5b8a-70cb-4b27-82db-8ca4fbd2318d
+    jinja: '{{ observation_1 }} {{ observation_2 }}
+
+
+      Would you rather believe that:
+
+
+      {{ hypothesis_1  | trim(''.?!'') }},
+
+
+      or:
+
+
+      {{ hypothesis_2  | trim(''.?!'') }}?
+
+      |||
+
+      {{ [hypothesis_1, hypothesis_2][label-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: hyp3
+    reference: ''
+  c0fc2e80-063f-4f8a-ad5d-c7603ed74883: !Template
+    answer_choices: null
+    id: c0fc2e80-063f-4f8a-ad5d-c7603ed74883
+    jinja: "Which of the following better fits the description?\n\nIs it that: \n\n\
+      {{ hypothesis_2  | trim('.?!') }},\n\nor rather: \n\n{{ hypothesis_1  | trim('.?!')\
+      \ }}?\n\nDescription: \n\n{{ observation_1 }} {{ observation_2 }}\n|||\n{{ [hypothesis_1,\
+      \ hypothesis_2][label-1] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: hyp6_reversed
+    reference: ''
+  d418b574-9d0a-4d29-a518-7d9a5f5a4a3d: !Template
+    answer_choices: null
+    id: d418b574-9d0a-4d29-a518-7d9a5f5a4a3d
+    jinja: "Which of the following better fits the description?\n\nIs it that: \n\n\
+      {{ hypothesis_1  | trim('.?!') }},\n\nor rather: \n\n{{ hypothesis_2  | trim('.?!')\
+      \ }}?\n\nDescription: \n\n{{ observation_1 }} {{ observation_2 }}\n|||\n{{ [hypothesis_1,\
+      \ hypothesis_2][label-1] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: hyp6
+    reference: ''
+  e4442077-bc1b-40eb-831f-a19971f810d7: !Template
+    answer_choices: Yes ||| No
+    id: e4442077-bc1b-40eb-831f-a19971f810d7
+    jinja: "Does this statement: \n\n{{ hypothesis_1  | trim('.?!') }} \n\nexplain\
+      \ the situation described below? \n\n{{ observation_1 }}\n{{ observation_2 }}\n\
+      |||\n{{ answer_choices[label-1] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hyp1_2
+    reference: ''
+  e90f1ef2-e6cd-4bfa-a697-a6d9e1077cee: !Template
+    answer_choices: null
+    id: e90f1ef2-e6cd-4bfa-a697-a6d9e1077cee
+    jinja: "We know that:\n\n{{ observation_1  | trim('.?!') }},\n\nand:\n\n{{ observation_2\
+      \ }} \n\nWhat is more likely?\n\nFirst option: \n\n{{ hypothesis_2  | trim('.?!')\
+      \ }}, \n\nor second option:\n\n{{ hypothesis_1  | trim('.?!') }}?\n|||\n{{ [hypothesis_1,\
+      \ hypothesis_2][label-1]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: hyp4_reversed
+    reference: ''
+  eb0baa43-3c79-4d1d-973a-37e0055bbfec: !Template
+    answer_choices: null
+    id: eb0baa43-3c79-4d1d-973a-37e0055bbfec
+    jinja: 'Which version is more accurate?
+
+
+      The first one:
+
+
+      {{ hypothesis_1  | trim(''.?!'') }},
+
+
+      or the second one:
+
+
+      {{ hypothesis_2  | trim(''.?!'') }}?
+
+
+      Assuming that:
+
+
+      {{ observation_1 }} {{ observation_2 }}
+
+      |||
+
+      {{ [hypothesis_1, hypothesis_2][label-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: hyp5
+    reference: ''
diff --git a/promptsource/templates/asnq/templates.yaml b/promptsource/templates/asnq/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f559f65949f8c0d13a7a9ba1f6c87b229e8d3b14
--- /dev/null
+++ b/promptsource/templates/asnq/templates.yaml
@@ -0,0 +1,118 @@
+dataset: asnq
+templates:
+  55f386ba-9a86-405e-a805-152e254a4205: !Template
+    answer_choices: null
+    id: 55f386ba-9a86-405e-a805-152e254a4205
+    jinja: "{% if label == 1 %}\n\nWhat is a question that someone might ask that\
+      \ the following sentence can answer?\n\n {{sentence}}\n\n|||\n\n{{question}}\n\
+      {% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Sentence question generation 2
+    reference: ''
+  5b6abb0a-1b4f-4338-aab6-430465669164: !Template
+    answer_choices: null
+    id: 5b6abb0a-1b4f-4338-aab6-430465669164
+    jinja: '{% if label == 1 %}
+
+
+      Write a question based on this sentence: {{sentence}}
+
+
+      |||
+
+
+      {{question}}
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sentence question generation
+    reference: ''
+  859ec580-957b-42da-be1b-c3ccb8b52d24: !Template
+    answer_choices: null
+    id: 859ec580-957b-42da-be1b-c3ccb8b52d24
+    jinja: '{% if label == 1 %}
+
+
+      Generate a one-sentence answer to the following question: {{question}}?
+
+
+      |||
+
+
+      {{sentence}}
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: answer question with a sentence 3
+    reference: ''
+  85da6666-9e50-4122-84c8-d00b90967475: !Template
+    answer_choices: null
+    id: 85da6666-9e50-4122-84c8-d00b90967475
+    jinja: '{% if label == 1 %}
+
+
+      I was wondering, {{question}}? Can you give me a full sentence answer?
+
+
+      |||
+
+
+      {{sentence}}
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: answer question with a sentence 2
+    reference: ''
+  85fe8aaa-83c5-41ec-ada5-0e6d60bab1f9: !Template
+    answer_choices: null
+    id: 85fe8aaa-83c5-41ec-ada5-0e6d60bab1f9
+    jinja: '{% if label == 1 %}
+
+
+      Answer this question as a full sentence: {{question}}?
+
+
+      |||
+
+
+      {{sentence}}
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer question as a sentence
+    reference: ''
+  a36d6152-72c4-4278-8266-d27b28667f61: !Template
+    answer_choices: null
+    id: a36d6152-72c4-4278-8266-d27b28667f61
+    jinja: "{% if label == 1 %}\n\nHere is a sentence:\n\n {{sentence}}\n\nWrite a\
+      \ question that this sentence is an answer to.\n\n|||\n\n{{question}}\n{% endif\
+      \ %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Sentence question generation 3
+    reference: ''
diff --git a/promptsource/templates/asset/ratings/templates.yaml b/promptsource/templates/asset/ratings/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f4f660dc7166beac73fb3f8fb0875c041c8f07e6
--- /dev/null
+++ b/promptsource/templates/asset/ratings/templates.yaml
@@ -0,0 +1,56 @@
+dataset: asset
+subset: ratings
+templates:
+  09b2a13b-cba6-4473-8a46-3fa24be71ce2: !Template
+    answer_choices: null
+    id: 09b2a13b-cba6-4473-8a46-3fa24be71ce2
+    jinja: "{% set questions= [ \"Does the second sentence better convey the information?\"\
+      ,  \"Is the second sentence more fluent?\", \"Is the second sentence easier\
+      \ to understand?\"] %}\n\nFirst sentence: {{original}}\n\nSecond sentence: {{simplification}}\n\
+      \n{{questions[aspect]}} \n\n|||\n\n{% if rating > 50 %}\n    Yes\n{% else %}\n\
+      \    No\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: asset_ratings1
+    reference: Taking questions from the original paper, we use rating to establish
+      a binary classification problem.
+  47142040-4121-4144-98b9-61cb5cbb1313: !Template
+    answer_choices: null
+    id: 47142040-4121-4144-98b9-61cb5cbb1313
+    jinja: 'First sentence: {{original}}
+
+
+      Second sentence: {{simplification}}
+
+
+      I am scoring these simplification exercises. How easier to read is the second
+      sentence on a scale from 0 (harder to read) to 100 (easier to read)?
+
+
+      |||
+
+
+      {{rating}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: asset_ratings3
+    reference: Prompt model to rate how simplified the sentence is in the general
+      sense, instead of an particular aspect.
+  d2bed959-29ab-4962-a106-dc91c00f3f03: !Template
+    answer_choices: null
+    id: d2bed959-29ab-4962-a106-dc91c00f3f03
+    jinja: "{% set statements= [ \"the second sentence expresses the underlying meaning\
+      \ the best.\",  \"the second sentence is more fluent.\", \"the second sentence\
+      \ is easier to read and understand.\"] %}\n\nFirst sentence: {{original}}\n\n\
+      Second sentence: {{simplification}}\n\nRate the following statement from 0 (strongly\
+      \ disagree) to 100 (strongly agree): {{statements[aspect]}} \n\n|||\n\n{{rating}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: asset_ratings2
+    reference: Require the model to output the rating
diff --git a/promptsource/templates/asset/simplification/templates.yaml b/promptsource/templates/asset/simplification/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5ccaa093dfa7f9eb1acd6671764cb094903a7ec8
--- /dev/null
+++ b/promptsource/templates/asset/simplification/templates.yaml
@@ -0,0 +1,41 @@
+dataset: asset
+subset: simplification
+templates:
+  0f0e55f9-28b4-4844-b65d-b9544a0918eb: !Template
+    answer_choices: null
+    id: 0f0e55f9-28b4-4844-b65d-b9544a0918eb
+    jinja: "{{original}}\n\nHow would I say this in another way? \n\n|||\n\n{{simplifications\
+      \ | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: asset_simplification1
+    reference: Rewrite text using one random simplification
+  3cbfbc1c-6876-4dd7-b7db-45fb3233a667: !Template
+    answer_choices: null
+    id: 3cbfbc1c-6876-4dd7-b7db-45fb3233a667
+    jinja: "{{simplifications | choice}}\n\nHow would I say this in another way? \n\
+      \n|||\n\n{{original}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: asset_simplification2
+    reference: Find the original text from the simplification
+  d528d74b-bbc2-4888-ae21-db0ab37304df: !Template
+    answer_choices: null
+    id: d528d74b-bbc2-4888-ae21-db0ab37304df
+    jinja: 'I''d like to explain to my child "{{original}}". How would I do so?
+
+
+      |||
+
+
+      {{simplifications | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: asset_simplification3
+    reference: Implicit simplification request
diff --git a/promptsource/templates/banking77/templates.yaml b/promptsource/templates/banking77/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4f2528e3d604a31ee48a849816164687303572ed
--- /dev/null
+++ b/promptsource/templates/banking77/templates.yaml
@@ -0,0 +1,269 @@
+dataset: banking77
+templates:
+  0dba8abc-248a-44db-bb86-20492ffc17f6: !Template
+    answer_choices: null
+    id: 0dba8abc-248a-44db-bb86-20492ffc17f6
+    jinja: "Which help page can be provided to provide information regarding this\
+      \ query?\n\n{{text}} |||\n{{\n[\n  \"activate_my_card\",\n  \"age_limit\",\n\
+      \  \"apple_pay_or_google_pay\",\n  \"atm_support\",\n  \"automatic_top_up\"\
+      ,\n  \"balance_not_updated_after_bank_transfer\",\n  \"balance_not_updated_after_cheque_or_cash_deposit\"\
+      ,\n  \"beneficiary_not_allowed\",\n  \"cancel_transfer\",\n  \"card_about_to_expire\"\
+      ,\n  \"card_acceptance\",\n  \"card_arrival\",\n  \"card_delivery_estimate\"\
+      ,\n  \"card_linking\",\n  \"card_not_working\",\n  \"card_payment_fee_charged\"\
+      ,\n  \"card_payment_not_recognised\",\n  \"card_payment_wrong_exchange_rate\"\
+      ,\n  \"card_swallowed\",\n  \"cash_withdrawal_charge\",\n  \"cash_withdrawal_not_recognised\"\
+      ,\n  \"change_pin\",\n  \"compromised_card\",\n  \"contactless_not_working\"\
+      ,\n  \"country_support\",\n  \"declined_card_payment\",\n  \"declined_cash_withdrawal\"\
+      ,\n  \"declined_transfer\",\n  \"direct_debit_payment_not_recognised\",\n  \"\
+      disposable_card_limits\",\n  \"edit_personal_details\",\n  \"exchange_charge\"\
+      ,\n  \"exchange_rate\",\n  \"exchange_via_app\",\n  \"extra_charge_on_statement\"\
+      ,\n  \"failed_transfer\",\n  \"fiat_currency_support\",\n  \"get_disposable_virtual_card\"\
+      ,\n  \"get_physical_card\",\n  \"getting_spare_card\",\n  \"getting_virtual_card\"\
+      ,\n  \"lost_or_stolen_card\",\n  \"lost_or_stolen_phone\",\n  \"order_physical_card\"\
+      ,\n  \"passcode_forgotten\",\n  \"pending_card_payment\",\n  \"pending_cash_withdrawal\"\
+      ,\n  \"pending_top_up\",\n  \"pending_transfer\",\n  \"pin_blocked\",\n  \"\
+      receiving_money\",\n  \"Refund_not_showing_up\",\n  \"request_refund\",\n  \"\
+      reverted_card_payment?\",\n  \"supported_cards_and_currencies\",\n  \"terminate_account\"\
+      ,\n  \"top_up_by_bank_transfer_charge\",\n  \"top_up_by_card_charge\",\n  \"\
+      top_up_by_cash_or_cheque\",\n  \"top_up_failed\",\n  \"top_up_limits\",\n  \"\
+      top_up_reverted\",\n  \"topping_up_by_card\",\n  \"transaction_charged_twice\"\
+      ,\n  \"transfer_fee_charged\",\n  \"transfer_into_account\",\n  \"transfer_not_received_by_recipient\"\
+      ,\n  \"transfer_timing\",\n  \"unable_to_verify_identity\",\n  \"verify_my_identity\"\
+      ,\n  \"verify_source_of_funds\",\n  \"verify_top_up\",\n  \"virtual_card_not_working\"\
+      ,\n  \"visa_or_mastercard\",\n  \"why_verify_identity\",\n  \"wrong_amount_of_cash_received\"\
+      ,\n  \"wrong_exchange_rate_for_cash_withdrawal\"\n] [label].replace(\"_\", \"\
+      \ \")\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: topic
+    reference: ''
+  2520f6d0-fcdf-44b6-abb3-a76e44948047: !Template
+    answer_choices: null
+    id: 2520f6d0-fcdf-44b6-abb3-a76e44948047
+    jinja: "To which department in the bank can this query be directed?\n\n{{text}}\
+      \ |||\n{{\n[\n  \"activate_my_card\",\n  \"age_limit\",\n  \"apple_pay_or_google_pay\"\
+      ,\n  \"atm_support\",\n  \"automatic_top_up\",\n  \"balance_not_updated_after_bank_transfer\"\
+      ,\n  \"balance_not_updated_after_cheque_or_cash_deposit\",\n  \"beneficiary_not_allowed\"\
+      ,\n  \"cancel_transfer\",\n  \"card_about_to_expire\",\n  \"card_acceptance\"\
+      ,\n  \"card_arrival\",\n  \"card_delivery_estimate\",\n  \"card_linking\",\n\
+      \  \"card_not_working\",\n  \"card_payment_fee_charged\",\n  \"card_payment_not_recognised\"\
+      ,\n  \"card_payment_wrong_exchange_rate\",\n  \"card_swallowed\",\n  \"cash_withdrawal_charge\"\
+      ,\n  \"cash_withdrawal_not_recognised\",\n  \"change_pin\",\n  \"compromised_card\"\
+      ,\n  \"contactless_not_working\",\n  \"country_support\",\n  \"declined_card_payment\"\
+      ,\n  \"declined_cash_withdrawal\",\n  \"declined_transfer\",\n  \"direct_debit_payment_not_recognised\"\
+      ,\n  \"disposable_card_limits\",\n  \"edit_personal_details\",\n  \"exchange_charge\"\
+      ,\n  \"exchange_rate\",\n  \"exchange_via_app\",\n  \"extra_charge_on_statement\"\
+      ,\n  \"failed_transfer\",\n  \"fiat_currency_support\",\n  \"get_disposable_virtual_card\"\
+      ,\n  \"get_physical_card\",\n  \"getting_spare_card\",\n  \"getting_virtual_card\"\
+      ,\n  \"lost_or_stolen_card\",\n  \"lost_or_stolen_phone\",\n  \"order_physical_card\"\
+      ,\n  \"passcode_forgotten\",\n  \"pending_card_payment\",\n  \"pending_cash_withdrawal\"\
+      ,\n  \"pending_top_up\",\n  \"pending_transfer\",\n  \"pin_blocked\",\n  \"\
+      receiving_money\",\n  \"Refund_not_showing_up\",\n  \"request_refund\",\n  \"\
+      reverted_card_payment?\",\n  \"supported_cards_and_currencies\",\n  \"terminate_account\"\
+      ,\n  \"top_up_by_bank_transfer_charge\",\n  \"top_up_by_card_charge\",\n  \"\
+      top_up_by_cash_or_cheque\",\n  \"top_up_failed\",\n  \"top_up_limits\",\n  \"\
+      top_up_reverted\",\n  \"topping_up_by_card\",\n  \"transaction_charged_twice\"\
+      ,\n  \"transfer_fee_charged\",\n  \"transfer_into_account\",\n  \"transfer_not_received_by_recipient\"\
+      ,\n  \"transfer_timing\",\n  \"unable_to_verify_identity\",\n  \"verify_my_identity\"\
+      ,\n  \"verify_source_of_funds\",\n  \"verify_top_up\",\n  \"virtual_card_not_working\"\
+      ,\n  \"visa_or_mastercard\",\n  \"why_verify_identity\",\n  \"wrong_amount_of_cash_received\"\
+      ,\n  \"wrong_exchange_rate_for_cash_withdrawal\"\n] [label] | replace(\"_\"\
+      , \" \")\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: department
+    reference: ''
+  9482bce0-f201-451b-9384-af588d707629: !Template
+    answer_choices: null
+    id: 9482bce0-f201-451b-9384-af588d707629
+    jinja: "\n{% set li =  [  \"activate_my_card\",\n  \"age_limit\",\n  \"apple_pay_or_google_pay\"\
+      ,\n  \"atm_support\",\n  \"automatic_top_up\",\n  \"balance_not_updated_after_bank_transfer\"\
+      ,\n  \"balance_not_updated_after_cheque_or_cash_deposit\",\n  \"beneficiary_not_allowed\"\
+      ,\n  \"cancel_transfer\",\n  \"card_about_to_expire\",\n  \"card_acceptance\"\
+      ,\n  \"card_arrival\",\n  \"card_delivery_estimate\",\n  \"card_linking\",\n\
+      \  \"card_not_working\",\n  \"card_payment_fee_charged\",\n  \"card_payment_not_recognised\"\
+      ,\n  \"card_payment_wrong_exchange_rate\",\n  \"card_swallowed\",\n  \"cash_withdrawal_charge\"\
+      ,\n  \"cash_withdrawal_not_recognised\",\n  \"change_pin\",\n  \"compromised_card\"\
+      ,\n  \"contactless_not_working\",\n  \"country_support\",\n  \"declined_card_payment\"\
+      ,\n  \"declined_cash_withdrawal\",\n  \"declined_transfer\",\n  \"direct_debit_payment_not_recognised\"\
+      ,\n  \"disposable_card_limits\",\n  \"edit_personal_details\",\n  \"exchange_charge\"\
+      ,\n  \"exchange_rate\",\n  \"exchange_via_app\",\n  \"extra_charge_on_statement\"\
+      ,\n  \"failed_transfer\",\n  \"fiat_currency_support\",\n  \"get_disposable_virtual_card\"\
+      ,\n  \"get_physical_card\",\n  \"getting_spare_card\",\n  \"getting_virtual_card\"\
+      ,\n  \"lost_or_stolen_card\",\n  \"lost_or_stolen_phone\",\n  \"order_physical_card\"\
+      ,\n  \"passcode_forgotten\",\n  \"pending_card_payment\",\n  \"pending_cash_withdrawal\"\
+      ,\n  \"pending_top_up\",\n  \"pending_transfer\",\n  \"pin_blocked\",\n  \"\
+      receiving_money\",\n  \"Refund_not_showing_up\",\n  \"request_refund\",\n  \"\
+      reverted_card_payment?\",\n  \"supported_cards_and_currencies\",\n  \"terminate_account\"\
+      ,\n  \"top_up_by_bank_transfer_charge\",\n  \"top_up_by_card_charge\",\n  \"\
+      top_up_by_cash_or_cheque\",\n  \"top_up_failed\",\n  \"top_up_limits\",\n  \"\
+      top_up_reverted\",\n  \"topping_up_by_card\",\n  \"transaction_charged_twice\"\
+      ,\n  \"transfer_fee_charged\",\n  \"transfer_into_account\",\n  \"transfer_not_received_by_recipient\"\
+      ,\n  \"transfer_timing\",\n  \"unable_to_verify_identity\",\n  \"verify_my_identity\"\
+      ,\n  \"verify_source_of_funds\",\n  \"verify_top_up\",\n  \"virtual_card_not_working\"\
+      ,\n  \"visa_or_mastercard\",\n  \"why_verify_identity\",\n  \"wrong_amount_of_cash_received\"\
+      ,\n  \"wrong_exchange_rate_for_cash_withdrawal\"\n] %}\n\nTo which department\
+      \ ({{li|join(\", \")|replace(\"_\", \" \")}}) in the bank can this query be\
+      \ directed?\n\n{{text}} |||\n{{ li [label] | replace(\"_\", \" \")}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: department_options
+    reference: ''
+  e629d77c-46f9-4e00-b23a-c522d07a9943: !Template
+    answer_choices: null
+    id: e629d77c-46f9-4e00-b23a-c522d07a9943
+    jinja: "Summarise the following query in the form of key banking terms\n\n{{text}}\
+      \ |||\n{{\n[\n  \"activate_my_card\",\n  \"age_limit\",\n  \"apple_pay_or_google_pay\"\
+      ,\n  \"atm_support\",\n  \"automatic_top_up\",\n  \"balance_not_updated_after_bank_transfer\"\
+      ,\n  \"balance_not_updated_after_cheque_or_cash_deposit\",\n  \"beneficiary_not_allowed\"\
+      ,\n  \"cancel_transfer\",\n  \"card_about_to_expire\",\n  \"card_acceptance\"\
+      ,\n  \"card_arrival\",\n  \"card_delivery_estimate\",\n  \"card_linking\",\n\
+      \  \"card_not_working\",\n  \"card_payment_fee_charged\",\n  \"card_payment_not_recognised\"\
+      ,\n  \"card_payment_wrong_exchange_rate\",\n  \"card_swallowed\",\n  \"cash_withdrawal_charge\"\
+      ,\n  \"cash_withdrawal_not_recognised\",\n  \"change_pin\",\n  \"compromised_card\"\
+      ,\n  \"contactless_not_working\",\n  \"country_support\",\n  \"declined_card_payment\"\
+      ,\n  \"declined_cash_withdrawal\",\n  \"declined_transfer\",\n  \"direct_debit_payment_not_recognised\"\
+      ,\n  \"disposable_card_limits\",\n  \"edit_personal_details\",\n  \"exchange_charge\"\
+      ,\n  \"exchange_rate\",\n  \"exchange_via_app\",\n  \"extra_charge_on_statement\"\
+      ,\n  \"failed_transfer\",\n  \"fiat_currency_support\",\n  \"get_disposable_virtual_card\"\
+      ,\n  \"get_physical_card\",\n  \"getting_spare_card\",\n  \"getting_virtual_card\"\
+      ,\n  \"lost_or_stolen_card\",\n  \"lost_or_stolen_phone\",\n  \"order_physical_card\"\
+      ,\n  \"passcode_forgotten\",\n  \"pending_card_payment\",\n  \"pending_cash_withdrawal\"\
+      ,\n  \"pending_top_up\",\n  \"pending_transfer\",\n  \"pin_blocked\",\n  \"\
+      receiving_money\",\n  \"Refund_not_showing_up\",\n  \"request_refund\",\n  \"\
+      reverted_card_payment?\",\n  \"supported_cards_and_currencies\",\n  \"terminate_account\"\
+      ,\n  \"top_up_by_bank_transfer_charge\",\n  \"top_up_by_card_charge\",\n  \"\
+      top_up_by_cash_or_cheque\",\n  \"top_up_failed\",\n  \"top_up_limits\",\n  \"\
+      top_up_reverted\",\n  \"topping_up_by_card\",\n  \"transaction_charged_twice\"\
+      ,\n  \"transfer_fee_charged\",\n  \"transfer_into_account\",\n  \"transfer_not_received_by_recipient\"\
+      ,\n  \"transfer_timing\",\n  \"unable_to_verify_identity\",\n  \"verify_my_identity\"\
+      ,\n  \"verify_source_of_funds\",\n  \"verify_top_up\",\n  \"virtual_card_not_working\"\
+      ,\n  \"visa_or_mastercard\",\n  \"why_verify_identity\",\n  \"wrong_amount_of_cash_received\"\
+      ,\n  \"wrong_exchange_rate_for_cash_withdrawal\"\n][label].replace(\"_\", \"\
+      \ \")\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: rephrase
+    reference: ''
+  edd67883-0386-4496-af7f-37a44c41293f: !Template
+    answer_choices: null
+    id: edd67883-0386-4496-af7f-37a44c41293f
+    jinja: "\n{% set li =  [  \"activate_my_card\",\n  \"age_limit\",\n  \"apple_pay_or_google_pay\"\
+      ,\n  \"atm_support\",\n  \"automatic_top_up\",\n  \"balance_not_updated_after_bank_transfer\"\
+      ,\n  \"balance_not_updated_after_cheque_or_cash_deposit\",\n  \"beneficiary_not_allowed\"\
+      ,\n  \"cancel_transfer\",\n  \"card_about_to_expire\",\n  \"card_acceptance\"\
+      ,\n  \"card_arrival\",\n  \"card_delivery_estimate\",\n  \"card_linking\",\n\
+      \  \"card_not_working\",\n  \"card_payment_fee_charged\",\n  \"card_payment_not_recognised\"\
+      ,\n  \"card_payment_wrong_exchange_rate\",\n  \"card_swallowed\",\n  \"cash_withdrawal_charge\"\
+      ,\n  \"cash_withdrawal_not_recognised\",\n  \"change_pin\",\n  \"compromised_card\"\
+      ,\n  \"contactless_not_working\",\n  \"country_support\",\n  \"declined_card_payment\"\
+      ,\n  \"declined_cash_withdrawal\",\n  \"declined_transfer\",\n  \"direct_debit_payment_not_recognised\"\
+      ,\n  \"disposable_card_limits\",\n  \"edit_personal_details\",\n  \"exchange_charge\"\
+      ,\n  \"exchange_rate\",\n  \"exchange_via_app\",\n  \"extra_charge_on_statement\"\
+      ,\n  \"failed_transfer\",\n  \"fiat_currency_support\",\n  \"get_disposable_virtual_card\"\
+      ,\n  \"get_physical_card\",\n  \"getting_spare_card\",\n  \"getting_virtual_card\"\
+      ,\n  \"lost_or_stolen_card\",\n  \"lost_or_stolen_phone\",\n  \"order_physical_card\"\
+      ,\n  \"passcode_forgotten\",\n  \"pending_card_payment\",\n  \"pending_cash_withdrawal\"\
+      ,\n  \"pending_top_up\",\n  \"pending_transfer\",\n  \"pin_blocked\",\n  \"\
+      receiving_money\",\n  \"Refund_not_showing_up\",\n  \"request_refund\",\n  \"\
+      reverted_card_payment?\",\n  \"supported_cards_and_currencies\",\n  \"terminate_account\"\
+      ,\n  \"top_up_by_bank_transfer_charge\",\n  \"top_up_by_card_charge\",\n  \"\
+      top_up_by_cash_or_cheque\",\n  \"top_up_failed\",\n  \"top_up_limits\",\n  \"\
+      top_up_reverted\",\n  \"topping_up_by_card\",\n  \"transaction_charged_twice\"\
+      ,\n  \"transfer_fee_charged\",\n  \"transfer_into_account\",\n  \"transfer_not_received_by_recipient\"\
+      ,\n  \"transfer_timing\",\n  \"unable_to_verify_identity\",\n  \"verify_my_identity\"\
+      ,\n  \"verify_source_of_funds\",\n  \"verify_top_up\",\n  \"virtual_card_not_working\"\
+      ,\n  \"visa_or_mastercard\",\n  \"why_verify_identity\",\n  \"wrong_amount_of_cash_received\"\
+      ,\n  \"wrong_exchange_rate_for_cash_withdrawal\"\n] %}\n\nWhich intent ({{ li|join(\"\
+      , \")|replace(\"_\", \" \")}}) best represents this banking query?\n\n{{text}}\
+      \ |||\n{{\nli [label] | replace(\"_\", \" \")\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: intent_options
+    reference: ''
+  eee2366a-8f0c-4ac3-b9cc-aa038e40f8cb: !Template
+    answer_choices: null
+    id: eee2366a-8f0c-4ac3-b9cc-aa038e40f8cb
+    jinja: "What is the intent of this banking query?\n\n{{text}} |||\n{{\n[\n  \"\
+      activate_my_card\",\n  \"age_limit\",\n  \"apple_pay_or_google_pay\",\n  \"\
+      atm_support\",\n  \"automatic_top_up\",\n  \"balance_not_updated_after_bank_transfer\"\
+      ,\n  \"balance_not_updated_after_cheque_or_cash_deposit\",\n  \"beneficiary_not_allowed\"\
+      ,\n  \"cancel_transfer\",\n  \"card_about_to_expire\",\n  \"card_acceptance\"\
+      ,\n  \"card_arrival\",\n  \"card_delivery_estimate\",\n  \"card_linking\",\n\
+      \  \"card_not_working\",\n  \"card_payment_fee_charged\",\n  \"card_payment_not_recognised\"\
+      ,\n  \"card_payment_wrong_exchange_rate\",\n  \"card_swallowed\",\n  \"cash_withdrawal_charge\"\
+      ,\n  \"cash_withdrawal_not_recognised\",\n  \"change_pin\",\n  \"compromised_card\"\
+      ,\n  \"contactless_not_working\",\n  \"country_support\",\n  \"declined_card_payment\"\
+      ,\n  \"declined_cash_withdrawal\",\n  \"declined_transfer\",\n  \"direct_debit_payment_not_recognised\"\
+      ,\n  \"disposable_card_limits\",\n  \"edit_personal_details\",\n  \"exchange_charge\"\
+      ,\n  \"exchange_rate\",\n  \"exchange_via_app\",\n  \"extra_charge_on_statement\"\
+      ,\n  \"failed_transfer\",\n  \"fiat_currency_support\",\n  \"get_disposable_virtual_card\"\
+      ,\n  \"get_physical_card\",\n  \"getting_spare_card\",\n  \"getting_virtual_card\"\
+      ,\n  \"lost_or_stolen_card\",\n  \"lost_or_stolen_phone\",\n  \"order_physical_card\"\
+      ,\n  \"passcode_forgotten\",\n  \"pending_card_payment\",\n  \"pending_cash_withdrawal\"\
+      ,\n  \"pending_top_up\",\n  \"pending_transfer\",\n  \"pin_blocked\",\n  \"\
+      receiving_money\",\n  \"Refund_not_showing_up\",\n  \"request_refund\",\n  \"\
+      reverted_card_payment?\",\n  \"supported_cards_and_currencies\",\n  \"terminate_account\"\
+      ,\n  \"top_up_by_bank_transfer_charge\",\n  \"top_up_by_card_charge\",\n  \"\
+      top_up_by_cash_or_cheque\",\n  \"top_up_failed\",\n  \"top_up_limits\",\n  \"\
+      top_up_reverted\",\n  \"topping_up_by_card\",\n  \"transaction_charged_twice\"\
+      ,\n  \"transfer_fee_charged\",\n  \"transfer_into_account\",\n  \"transfer_not_received_by_recipient\"\
+      ,\n  \"transfer_timing\",\n  \"unable_to_verify_identity\",\n  \"verify_my_identity\"\
+      ,\n  \"verify_source_of_funds\",\n  \"verify_top_up\",\n  \"virtual_card_not_working\"\
+      ,\n  \"visa_or_mastercard\",\n  \"why_verify_identity\",\n  \"wrong_amount_of_cash_received\"\
+      ,\n  \"wrong_exchange_rate_for_cash_withdrawal\"\n] [label].replace(\"_\", \"\
+      \ \")\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: intent
+    reference: ''
+  f4e80455-1523-4b91-aacc-249d8c6f0f2a: !Template
+    answer_choices: null
+    id: f4e80455-1523-4b91-aacc-249d8c6f0f2a
+    jinja: "Generate the subject for the email containing this query:\n\n{{text}}\
+      \ |||\n{{\n[\n  \"activate_my_card\",\n  \"age_limit\",\n  \"apple_pay_or_google_pay\"\
+      ,\n  \"atm_support\",\n  \"automatic_top_up\",\n  \"balance_not_updated_after_bank_transfer\"\
+      ,\n  \"balance_not_updated_after_cheque_or_cash_deposit\",\n  \"beneficiary_not_allowed\"\
+      ,\n  \"cancel_transfer\",\n  \"card_about_to_expire\",\n  \"card_acceptance\"\
+      ,\n  \"card_arrival\",\n  \"card_delivery_estimate\",\n  \"card_linking\",\n\
+      \  \"card_not_working\",\n  \"card_payment_fee_charged\",\n  \"card_payment_not_recognised\"\
+      ,\n  \"card_payment_wrong_exchange_rate\",\n  \"card_swallowed\",\n  \"cash_withdrawal_charge\"\
+      ,\n  \"cash_withdrawal_not_recognised\",\n  \"change_pin\",\n  \"compromised_card\"\
+      ,\n  \"contactless_not_working\",\n  \"country_support\",\n  \"declined_card_payment\"\
+      ,\n  \"declined_cash_withdrawal\",\n  \"declined_transfer\",\n  \"direct_debit_payment_not_recognised\"\
+      ,\n  \"disposable_card_limits\",\n  \"edit_personal_details\",\n  \"exchange_charge\"\
+      ,\n  \"exchange_rate\",\n  \"exchange_via_app\",\n  \"extra_charge_on_statement\"\
+      ,\n  \"failed_transfer\",\n  \"fiat_currency_support\",\n  \"get_disposable_virtual_card\"\
+      ,\n  \"get_physical_card\",\n  \"getting_spare_card\",\n  \"getting_virtual_card\"\
+      ,\n  \"lost_or_stolen_card\",\n  \"lost_or_stolen_phone\",\n  \"order_physical_card\"\
+      ,\n  \"passcode_forgotten\",\n  \"pending_card_payment\",\n  \"pending_cash_withdrawal\"\
+      ,\n  \"pending_top_up\",\n  \"pending_transfer\",\n  \"pin_blocked\",\n  \"\
+      receiving_money\",\n  \"Refund_not_showing_up\",\n  \"request_refund\",\n  \"\
+      reverted_card_payment?\",\n  \"supported_cards_and_currencies\",\n  \"terminate_account\"\
+      ,\n  \"top_up_by_bank_transfer_charge\",\n  \"top_up_by_card_charge\",\n  \"\
+      top_up_by_cash_or_cheque\",\n  \"top_up_failed\",\n  \"top_up_limits\",\n  \"\
+      top_up_reverted\",\n  \"topping_up_by_card\",\n  \"transaction_charged_twice\"\
+      ,\n  \"transfer_fee_charged\",\n  \"transfer_into_account\",\n  \"transfer_not_received_by_recipient\"\
+      ,\n  \"transfer_timing\",\n  \"unable_to_verify_identity\",\n  \"verify_my_identity\"\
+      ,\n  \"verify_source_of_funds\",\n  \"verify_top_up\",\n  \"virtual_card_not_working\"\
+      ,\n  \"visa_or_mastercard\",\n  \"why_verify_identity\",\n  \"wrong_amount_of_cash_received\"\
+      ,\n  \"wrong_exchange_rate_for_cash_withdrawal\"\n][label].replace(\"_\", \"\
+      \ \")\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate_subject
+    reference: ''
diff --git a/promptsource/templates/billsum/templates.yaml b/promptsource/templates/billsum/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..91655473d6514804dc18ba5ce1b4fc1b07899470
--- /dev/null
+++ b/promptsource/templates/billsum/templates.yaml
@@ -0,0 +1,104 @@
+dataset: billsum
+templates:
+  3ac01292-4a54-4546-b4e6-c225ae114213: !Template
+    answer_choices: null
+    id: 3ac01292-4a54-4546-b4e6-c225ae114213
+    jinja: 'Summarize: {{text}}|||
+
+      Title: {{title}}
+
+      Summary: {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: 'Summarize: (text-> title,summary)'
+    reference: ''
+  3c790ac3-0557-47a9-9b71-1cb435f15629: !Template
+    answer_choices: null
+    id: 3c790ac3-0557-47a9-9b71-1cb435f15629
+    jinja: 'Summarize this bill: {{text}} |||
+
+      {{title}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: 'Summarize this bill in one sentence: (text-> title)'
+    reference: ''
+  438192e5-d67a-4098-9d82-a9fe892f6be2: !Template
+    answer_choices: null
+    id: 438192e5-d67a-4098-9d82-a9fe892f6be2
+    jinja: 'Write a bill: {{summary}} |||
+
+      {{text}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: 'Write a bill: (summary-> text)'
+    reference: ''
+  4891a8e7-258c-41e2-80d3-0c1a054acb07: !Template
+    answer_choices: null
+    id: 4891a8e7-258c-41e2-80d3-0c1a054acb07
+    jinja: 'Write a bill: {{title}} |||
+
+      {{text}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: 'Write a bill: (title-> text)'
+    reference: ''
+  550fa161-af4e-4430-9844-ce7dad587733: !Template
+    answer_choices: null
+    id: 550fa161-af4e-4430-9844-ce7dad587733
+    jinja: 'Summarize this bill: {{text}} |||
+
+      {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: 'Summarize this bill: (text-> summary)'
+    reference: ''
+  5d2404b9-63ff-406e-977d-eda6afb5c689: !Template
+    answer_choices: null
+    id: 5d2404b9-63ff-406e-977d-eda6afb5c689
+    jinja: '{{summary}}
+
+      ===
+
+      Generate title from summary:
+
+      |||{{title}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Generate title from summary
+    reference: ''
+  6a439a80-4924-49e9-b5ae-f661683b399f: !Template
+    answer_choices: null
+    id: 6a439a80-4924-49e9-b5ae-f661683b399f
+    jinja: 'Summarize: {{text}}
+
+      |||{{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: 'Summarize: (text -> summary )'
+    reference: ''
+  ea9f0376-6cec-450c-b258-89f479cb9f6d: !Template
+    answer_choices: null
+    id: ea9f0376-6cec-450c-b258-89f479cb9f6d
+    jinja: 'Summarize: {{summary}}
+
+      |||{{title}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: 'Summarize: (summary -> title)'
+    reference: ''
diff --git a/promptsource/templates/bing_coronavirus_query_set/templates.yaml b/promptsource/templates/bing_coronavirus_query_set/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4c3f008b8bf01d471ff3964d1685aabd8e3d8414
--- /dev/null
+++ b/promptsource/templates/bing_coronavirus_query_set/templates.yaml
@@ -0,0 +1,72 @@
+dataset: bing_coronavirus_query_set
+templates:
+  43332782-9e92-4bb2-94bf-28759f3fe181: !Template
+    answer_choices: null
+    id: 43332782-9e92-4bb2-94bf-28759f3fe181
+    jinja: "This search query talks about the coronavirus and was published on {{Date}}.\
+      \ In what country was it issued ? \n{{Query}}\n|||\n{{Country}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: 'what_country '
+    reference: ''
+  68f9c063-1907-4866-ab1b-756cc57e5695: !Template
+    answer_choices: null
+    id: 68f9c063-1907-4866-ab1b-756cc57e5695
+    jinja: "The user is searching for coronavirus results on Bing.com. Is the intent\
+      \ implicit or explicit ? \n{{Query}}\n|||\n{% if IsImplicitIntent == \"True\"\
+      \ %}\nimplicit\n{% else %}\nexplicit\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: 'is_implicit_or_explicit '
+    reference: ''
+  992d541f-9e0c-466d-b4c4-92e9e236f863: !Template
+    answer_choices: null
+    id: 992d541f-9e0c-466d-b4c4-92e9e236f863
+    jinja: "This search query about coronavirus was issued in {{Country}} on {{Date}}.\
+      \ Is the intent implicit or explicit ? \n{{Query}}\n|||\n{% if IsImplicitIntent\
+      \ == \"True\" %}\nimplicit\n{% else %}\nexplicit \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: 'is_explicit_country_date '
+    reference: ''
+  d4a251d7-0e23-4feb-8bf2-18e32c553199: !Template
+    answer_choices: null
+    id: d4a251d7-0e23-4feb-8bf2-18e32c553199
+    jinja: "On what date was this search engine query  issued, during the Covid-19\
+      \ pandemic ? \n{{Query}}\n|||\n{{Date}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: 'what_date '
+    reference: ''
+  df53652c-36dc-45fe-a015-d0781e32cd33: !Template
+    answer_choices: null
+    id: df53652c-36dc-45fe-a015-d0781e32cd33
+    jinja: "Does this search engine query have an indirect relation to Covid-19 ?\
+      \ \n{{Query}}\n|||\n{% if IsImplicitIntent == \"True\" %}\nYes\n{% else %}\n\
+      No\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: is_implicit_query
+    reference: ''
+  df7bc2ee-686c-4826-ad84-3a056a2da4d4: !Template
+    answer_choices: null
+    id: df7bc2ee-686c-4826-ad84-3a056a2da4d4
+    jinja: "Does this search query on Bing.com talk about the  coronavirus explicitly\
+      \ ? \n{{Query}}\n|||\n{% if IsImplicitIntent == \"True\" %}\nNo\n{% else %}\n\
+      Yes\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: is_explicit_query
+    reference: ''
diff --git a/promptsource/templates/blended_skill_talk/templates.yaml b/promptsource/templates/blended_skill_talk/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b3495979cf0b06abb10b1192d48d7989e3c7f15f
--- /dev/null
+++ b/promptsource/templates/blended_skill_talk/templates.yaml
@@ -0,0 +1,46 @@
+dataset: blended_skill_talk
+templates:
+  54f785e9-453a-4ffe-8181-28095e3f2b80: !Template
+    answer_choices: null
+    id: 54f785e9-453a-4ffe-8181-28095e3f2b80
+    jinja: "Given the below conversation between two people, what would the listener\
+      \ say?\n\nA: {{previous_utterance[0]}}\n\nB: {{previous_utterance[1]}}\n\n{%\
+      \ for message_f, message_g in zip(free_messages[:-1], guided_messages[:-1])\
+      \ %}\nA: {{message_f}}\n\nB: {{message_g}}\n{% endfor %} \n\nA: {{free_messages[-1]}}\n\
+      \nB: \n|||\n{{guided_messages[-1]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: guess-last-utterance
+    reference: ''
+  58f4e068-26fa-4843-a1d6-54bde324e780: !Template
+    answer_choices: null
+    id: 58f4e068-26fa-4843-a1d6-54bde324e780
+    jinja: "Two people are having a conversation. Are the utterances in the correct\
+      \ order?\n{% if range(0, 2) | choice %}\nA: {{previous_utterance[0]}}\n\nB:\
+      \ {{previous_utterance[1]}}\n\n{% for message_f, message_g in zip(free_messages,\
+      \ guided_messages) %}\nA: {{message_f}}\n\nB: {{message_g}}\n{% endfor %} \n\
+      \n|||\nYes, they are.\n{% else %}\nA: {{previous_utterance[1]}}\n\nB: {{previous_utterance[0]}}\n\
+      \n{% for message_f, message_g in zip(guided_messages, free_messages) %}\nA:\
+      \ {{message_f}}\n\nB: {{message_g}}\n{% endfor %} \n\n|||\nNo, they are not.\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: guess-correct-order
+    reference: ''
+  8792b63e-7217-40fe-8130-7392baca3519: !Template
+    answer_choices: null
+    id: 8792b63e-7217-40fe-8130-7392baca3519
+    jinja: "Two people are talking to each other.  What do you think Person A said\
+      \ in the beginning?\n\nPerson B: {{previous_utterance[1]}}\n\n{% for message_f,\
+      \ message_g in zip(free_messages, guided_messages) %}\nPerson A: {{message_f}}\n\
+      \nPerson B: {{message_g}}\n{% endfor %} \n|||\n{{previous_utterance[0]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: guess-first-utterance
+    reference: ''
diff --git a/promptsource/templates/boolq/templates.yaml b/promptsource/templates/boolq/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4676074c04975b03c37dd723baf77de63ec142e4
--- /dev/null
+++ b/promptsource/templates/boolq/templates.yaml
@@ -0,0 +1,99 @@
+dataset: boolq
+templates:
+  9bd5fbaa-e7a2-4847-a7a1-500591d90bb4: !Template
+    answer_choices: null
+    id: 9bd5fbaa-e7a2-4847-a7a1-500591d90bb4
+    jinja: '{{passage}} {{question}}? |||
+
+      {% if answer == true %}
+
+      Yes
+
+      {% else %}
+
+      No
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: LM style
+    reference: Concatenate passage and question. Transform True/False into Yes/No.
+  c746b16d-212d-4f1f-9988-9fee99584f25: !Template
+    answer_choices: null
+    id: c746b16d-212d-4f1f-9988-9fee99584f25
+    jinja: '{{passage}}
+
+      Question: {{question}}?
+
+      Answer: |||
+
+      {% if answer == true %}
+
+      Yes
+
+      {% else %}
+
+      No
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Boolq GPT3
+    reference: Take from GPT3 - Figure G29
+  dc7caf4f-b109-4a82-86a0-2798a5437283: !Template
+    answer_choices: null
+    id: dc7caf4f-b109-4a82-86a0-2798a5437283
+    jinja: '{{passage}}
+
+      {{question}}?
+
+      Answer by yes or no. |||
+
+      {% if answer == true %}
+
+      Yes
+
+      {% else %}
+
+      No
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: yes/no
+    reference: Yes or no
+  fbba0375-4220-4483-8bbe-0fd630330611: !Template
+    answer_choices: null
+    id: fbba0375-4220-4483-8bbe-0fd630330611
+    jinja: 'Answer the question based on the passage.
+
+      ===
+
+      Question: {{question}}?
+
+      Passage: {{passage}}
+
+      Answer: |||
+
+      {% if answer == true %}
+
+      Yes
+
+      {% else %}
+
+      No
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Exercise style
+    reference: Prompt in the style of task description + instance. Mapped True/False
+      into Yes/No
diff --git a/promptsource/templates/cbt/CN/templates.yaml b/promptsource/templates/cbt/CN/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..df3d9e3658ac90ecaccdc2cc1cc3dcf67f3c6e4c
--- /dev/null
+++ b/promptsource/templates/cbt/CN/templates.yaml
@@ -0,0 +1,45 @@
+dataset: cbt
+subset: CN
+templates:
+  0725fe5e-1bba-4e08-a448-9e0038164914: !Template
+    answer_choices: null
+    id: 0725fe5e-1bba-4e08-a448-9e0038164914
+    jinja: 'Write the next sentence of this story: {{sentences | join('''')}}
+
+      |||
+
+      {{ question.replace("XXXXX", answer) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: next_sentence_generation
+    reference: Generate the next sentence given the story.
+  2c326181-dbba-401e-accb-d84ea0162f0a: !Template
+    answer_choices: null
+    id: 2c326181-dbba-401e-accb-d84ea0162f0a
+    jinja: 'Read the passage and fill in the XXXXX:
+
+      {{ sentences | join('''') }} {{question}}
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_prediction
+    reference: Fill in the blank without options.
+  b26cae56-1fbd-47a5-8c8d-d981ca098239: !Template
+    answer_choices: null
+    id: b26cae56-1fbd-47a5-8c8d-d981ca098239
+    jinja: "Which of the following options replaces XXXXX the best?\n{{ options |\
+      \ join (\", \") }}\nin this story: \n{{sentences | join ('')}} {{question}}\n\
+      |||\n{{ answer }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multi_choice
+    reference: Given the sentences, fill the blanks using the options.
diff --git a/promptsource/templates/cbt/NE/templates.yaml b/promptsource/templates/cbt/NE/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2955bd4b4a275a41bee92a10b6a50d86b7230ff7
--- /dev/null
+++ b/promptsource/templates/cbt/NE/templates.yaml
@@ -0,0 +1,45 @@
+dataset: cbt
+subset: NE
+templates:
+  1fd986ce-e44d-4f32-bbb8-f5d4d3d930d9: !Template
+    answer_choices: null
+    id: 1fd986ce-e44d-4f32-bbb8-f5d4d3d930d9
+    jinja: "Which of the following options replaces XXXXX the best?\n{{ options |\
+      \ join (\", \") }}\nin this story: \n{{sentences | join ('')}} {{question}}\n\
+      |||\n{{ answer }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multi_choice
+    reference: Given the sentences, fill the blanks using the options.
+  3c56e28d-668a-42d0-8976-93864e38bc4c: !Template
+    answer_choices: null
+    id: 3c56e28d-668a-42d0-8976-93864e38bc4c
+    jinja: 'Read the passage and fill in the XXXXX:
+
+      {{ sentences | join('''') }} {{question}}
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_prediction
+    reference: Fill in the blank without options.
+  d2f4dcdd-232e-4e56-a9e1-1aed294e651f: !Template
+    answer_choices: null
+    id: d2f4dcdd-232e-4e56-a9e1-1aed294e651f
+    jinja: 'Write the next sentence of this story: {{sentences | join('''')}}
+
+      |||
+
+      {{ question.replace("XXXXX", answer) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: next_sentence_generation
+    reference: Generate the next sentence given the story.
diff --git a/promptsource/templates/cbt/P/templates.yaml b/promptsource/templates/cbt/P/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a8187bc02f4f206232a6d654f6fc106981122078
--- /dev/null
+++ b/promptsource/templates/cbt/P/templates.yaml
@@ -0,0 +1,45 @@
+dataset: cbt
+subset: P
+templates:
+  0c217578-64bb-431d-af5b-8944582a49f2: !Template
+    answer_choices: null
+    id: 0c217578-64bb-431d-af5b-8944582a49f2
+    jinja: 'Read the passage and fill in the XXXXX:
+
+      {{ sentences | join('''') }} {{question}}
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_prediction
+    reference: Fill in the blank without options.
+  3753a293-98ba-4f98-9bb9-96b86aa0b719: !Template
+    answer_choices: null
+    id: 3753a293-98ba-4f98-9bb9-96b86aa0b719
+    jinja: "Which of the following options replaces XXXXX the best?\n{{ options |\
+      \ join (\", \") }}\nin this story: \n{{sentences | join ('')}} {{question}}\n\
+      |||\n{{ answer }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multi_choice
+    reference: Given the sentences, fill the blanks using the options.
+  e7a60793-f142-44e2-9fab-b39ba3236106: !Template
+    answer_choices: null
+    id: e7a60793-f142-44e2-9fab-b39ba3236106
+    jinja: 'Write the next sentence of this story: {{sentences | join('''')}}
+
+      |||
+
+      {{ question.replace("XXXXX", answer) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: next_sentence_generation
+    reference: Generate the next sentence given the story.
diff --git a/promptsource/templates/cbt/V/templates.yaml b/promptsource/templates/cbt/V/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..67a2d61866eea24cf41a9ca6d9b81737e579b21f
--- /dev/null
+++ b/promptsource/templates/cbt/V/templates.yaml
@@ -0,0 +1,45 @@
+dataset: cbt
+subset: V
+templates:
+  08820238-5bb3-4c7c-98bb-ec3d81e432e7: !Template
+    answer_choices: null
+    id: 08820238-5bb3-4c7c-98bb-ec3d81e432e7
+    jinja: 'Write the next sentence of this story: {{sentences | join('''')}}
+
+      |||
+
+      {{ question.replace("XXXXX", answer) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: next_sentence_generation
+    reference: Generate the next sentence given the story.
+  63bfa7b6-b566-4693-848c-e05cd7a12a03: !Template
+    answer_choices: null
+    id: 63bfa7b6-b566-4693-848c-e05cd7a12a03
+    jinja: 'Read the passage and fill in the XXXXX:
+
+      {{ sentences | join('''') }} {{question}}
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_prediction
+    reference: Fill in the blank without options.
+  a2e38459-90d9-4292-9d96-491ad7d4e3db: !Template
+    answer_choices: null
+    id: a2e38459-90d9-4292-9d96-491ad7d4e3db
+    jinja: "Which of the following options replaces XXXXX the best?\n{{ options |\
+      \ join (\", \") }}\nin this story: \n{{sentences | join ('')}} {{question}}\n\
+      |||\n{{ answer }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multi_choice
+    reference: Given the sentences, fill the blanks using the options.
diff --git a/promptsource/templates/cbt/raw/templates.yaml b/promptsource/templates/cbt/raw/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c127296d6e4e41552b6ef2d531f0b21b9b798935
--- /dev/null
+++ b/promptsource/templates/cbt/raw/templates.yaml
@@ -0,0 +1,32 @@
+dataset: cbt
+subset: raw
+templates:
+  2d9e9c74-550e-4838-8d1d-a804d74828f7: !Template
+    answer_choices: null
+    id: 2d9e9c74-550e-4838-8d1d-a804d74828f7
+    jinja: 'Write a story for this title: {{title.split(''___'')[1].split(''.'')[0].replace(''_'',''
+      '')}}
+
+      |||
+
+      {{ content }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: write_story
+    reference: Given the title, write a story.
+  f4e1d9bb-a43e-4c75-aa5d-4711090dd628: !Template
+    answer_choices: null
+    id: f4e1d9bb-a43e-4c75-aa5d-4711090dd628
+    jinja: 'Write a title for this story: {{ content }}
+
+      |||
+
+      {{title.split(''___'')[1].split(''.'')[0].replace(''_'','' '')}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: write_title
+    reference: Given the story, write a title.
diff --git a/promptsource/templates/cc_news/templates.yaml b/promptsource/templates/cc_news/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..fdc0fd739ea88cf2882caabc94b4a80e71370c5c
--- /dev/null
+++ b/promptsource/templates/cc_news/templates.yaml
@@ -0,0 +1,208 @@
+dataset: cc_news
+templates:
+  0c630a0d-5eeb-46ea-ba15-f76f5d05a57d: !Template
+    answer_choices: null
+    id: 0c630a0d-5eeb-46ea-ba15-f76f5d05a57d
+    jinja: 'What could be the content of a news article with the following title and
+      summary?
+
+
+      Title: {{title}}
+
+
+      Summary: {{description}}
+
+      |||
+
+      {{text}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: text_3
+    reference: ''
+  0c651168-8729-4a35-8c7c-5d812d4be790: !Template
+    answer_choices: null
+    id: 0c651168-8729-4a35-8c7c-5d812d4be790
+    jinja: "{{ text }} \n\nGive a brief description of the above text.\n|||\n{{ description\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: desc_c_q_1
+    reference: ''
+  11a681c3-8450-4064-aa08-ad3700b8b1bd: !Template
+    answer_choices: null
+    id: 11a681c3-8450-4064-aa08-ad3700b8b1bd
+    jinja: '{{ text }}
+
+
+      What title would you choose for the text above?
+
+      |||
+
+      {{ title }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: title_c_q_2
+    reference: ''
+  14aca5f0-89ae-4ae1-9746-7a68f6a0664f: !Template
+    answer_choices: null
+    id: 14aca5f0-89ae-4ae1-9746-7a68f6a0664f
+    jinja: 'Suggest the content of a news article entitled:
+
+
+      {{ title }},
+
+
+      regarding:
+
+
+      {{ description }}
+
+      |||
+
+      {{ text }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: text_1
+    reference: ''
+  319a6d41-d6bb-4f8f-ba1b-085a45b3eddd: !Template
+    answer_choices: null
+    id: 319a6d41-d6bb-4f8f-ba1b-085a45b3eddd
+    jinja: "Write a brief summary of the text below: \n\n{{ text }}\n|||\n{{ description\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: desc_q_c_3
+    reference: ''
+  5ca5100e-7aa6-48c0-9e78-48914739dc90: !Template
+    answer_choices: null
+    id: 5ca5100e-7aa6-48c0-9e78-48914739dc90
+    jinja: 'Use the description below to write a news article entitled:
+
+      {{ title }}.
+
+
+      Description: {{ description }}
+
+      |||
+
+      {{ text }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: text_4
+    reference: ''
+  7fd214bd-2403-42aa-850f-5255771e5609: !Template
+    answer_choices: null
+    id: 7fd214bd-2403-42aa-850f-5255771e5609
+    jinja: "Choose a title for the text below: \n\n{{ text }}\n|||\n{{ title }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: title_q_c_2
+    reference: ''
+  858a02bf-10c0-4284-886e-26a8859b2cc3: !Template
+    answer_choices: null
+    id: 858a02bf-10c0-4284-886e-26a8859b2cc3
+    jinja: '{{ text }}
+
+
+      Summarize the essential ideas of the above piece of news.
+
+      |||
+
+      {{ description }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: desc_c_q_2
+    reference: ''
+  a993713f-fd0e-4d62-99c0-e1313ab5c1c8: !Template
+    answer_choices: null
+    id: a993713f-fd0e-4d62-99c0-e1313ab5c1c8
+    jinja: "{{ text }} \n\nWhat title suits best the above piece of news?\n|||\n{{\
+      \ title }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: title_c_q_1
+    reference: ''
+  ae553815-f631-4e67-a6bc-6d8a21dedb25: !Template
+    answer_choices: null
+    id: ae553815-f631-4e67-a6bc-6d8a21dedb25
+    jinja: "Summarize the essential ideas of the following piece of news: \n\n{{ text\
+      \ }}\n|||\n{{ description }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: desc_q_c_2
+    reference: ''
+  b637cfd7-d4b8-420a-b60b-4fe0aa891000: !Template
+    answer_choices: null
+    id: b637cfd7-d4b8-420a-b60b-4fe0aa891000
+    jinja: 'Write a piece of news expanding the following ideas:
+
+
+      {{ description }},
+
+
+      entitled:
+
+
+      {{ title }}
+
+      |||
+
+      {{ text }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: text_2
+    reference: ''
+  cc13d9b7-041a-4b29-b6c4-a6851a21fb46: !Template
+    answer_choices: null
+    id: cc13d9b7-041a-4b29-b6c4-a6851a21fb46
+    jinja: "Give this text a title: \n\n{{ text }}\n|||\n{{ title }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: title_q_c_1
+    reference: ''
+  e4d40d0e-8c38-45ef-97dd-15ebab0b4078: !Template
+    answer_choices: null
+    id: e4d40d0e-8c38-45ef-97dd-15ebab0b4078
+    jinja: "Give a brief description of the following text: \n\n{{ text }}\n|||\n\
+      {{ description }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: desc_q_c_1
+    reference: ''
+  f4a0b21c-fcf1-4e3d-aa59-7cf3b9ae8780: !Template
+    answer_choices: null
+    id: f4a0b21c-fcf1-4e3d-aa59-7cf3b9ae8780
+    jinja: "{{ text }} \n\nThe above text can be summarized as follows:\n|||\n{{ description\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: desc_c_q_3
+    reference: ''
diff --git a/promptsource/templates/circa/templates.yaml b/promptsource/templates/circa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..75a28755bb191aad118ff3f61c2ca5865a04d896
--- /dev/null
+++ b/promptsource/templates/circa/templates.yaml
@@ -0,0 +1,91 @@
+dataset: circa
+templates:
+  053260a8-1bcc-4805-81d2-bb528fc56ca2: !Template
+    answer_choices: null
+    id: 053260a8-1bcc-4805-81d2-bb528fc56ca2
+    jinja: 'Convert this question to a sentence declarative sentence asserting an
+      affirmative answer:
+
+
+      {{question_X}} |||
+
+      {{canquestion_X}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - BLEU
+      - Edit Distance
+      - ROUGE
+      original_task: false
+    name: question_declarative
+    reference: ''
+  70b7a94a-6a39-4a81-9a6e-0709a0acdb28: !Template
+    answer_choices: "Yes ||| No ||| In the middle, neither yes nor no ||| Probably\
+      \ yes / sometimes yes ||| Probably no ||| Yes, subject to some conditions |||\
+      \ Other ||| I am not sure how X will interpret Y\u2019s answer"
+    id: 70b7a94a-6a39-4a81-9a6e-0709a0acdb28
+    jinja: "{% if goldstandard2 != -1 %}\n\nGiven the question-answer pair of X and\
+      \ Y in the context of {{context}}, which of the following answers is Y implying:\
+      \ \"{{\"Yes\"}}\", \"{{\"No\"}}\", \"{{\"In the middle, neither yes nor no\"\
+      }}\", \"{{\"Probably yes / sometimes yes\"}}\", \"{{\"Probably no\"}}\", \"\
+      {{\"Yes, subject to some conditions\"}}\", \"{{\"Other\"}}\" or \"{{\"I am not\
+      \ sure how X will interpret Y\u2019s answer\"}}\" ?\n\nX: {{question_X}} \n\n\
+      Y: {{answer_Y}} |||\n\n{{   answer_choices[goldstandard2]}}\n\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: goldstandard2_judgement
+    reference: ''
+  73466d0f-b1b1-4c61-8f03-346e121ae06c: !Template
+    answer_choices: null
+    id: 73466d0f-b1b1-4c61-8f03-346e121ae06c
+    jinja: 'What is a possible question X could ask Y given the context of {{context}}
+      that would cause Y to answer "{{answer_Y}}"? |||
+
+      {{question_X}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: possible_qn
+    reference: ''
+  997f7f96-d420-48c1-85f7-ecade54adbd7: !Template
+    answer_choices: "Yes ||| No ||| In the middle, neither yes nor no ||| Probably\
+      \ yes / sometimes yes ||| Probably no ||| Yes, subject to some conditions |||\
+      \ Other ||| I am not sure how X will interpret Y\u2019s answer"
+    id: 997f7f96-d420-48c1-85f7-ecade54adbd7
+    jinja: "{% if goldstandard1 != -1 %}\n\nGiven the question-answer pair of X and\
+      \ Y in the context of {{context}}, what answer is Y implying?\n\nX: {{question_X}}\
+      \ \n\nY: {{answer_Y}} |||\n\n{{   answer_choices[goldstandard1]}}\n\n{% endif\
+      \ %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: judgement
+    reference: ''
+  a15c1a30-5ef0-451f-b202-987a16752a0a: !Template
+    answer_choices: "Yes ||| No ||| In the middle, neither yes nor no ||| Probably\
+      \ yes / sometimes yes ||| Probably no ||| Yes, subject to some conditions |||\
+      \ Other ||| I am not sure how X will interpret Y\u2019s answer"
+    id: a15c1a30-5ef0-451f-b202-987a16752a0a
+    jinja: "{% if goldstandard1 != -1 %}\n\nGiven the question-answer pair of X and\
+      \ Y in the context of {{context}}, which of the following answers is Y implying:\
+      \ \"{{\"Yes\"}}\", \"{{\"No\"}}\", \"{{\"In the middle, neither yes nor no\"\
+      }}\", \"{{\"Probably yes / sometimes yes\"}}\", \"{{\"Probably no\"}}\", \"\
+      {{\"Yes, subject to some conditions\"}}\", \"{{\"Other\"}}\" or \"{{\"I am not\
+      \ sure how X will interpret Y\u2019s answer\"}}\" ?\n\nX: {{question_X}} \n\n\
+      Y: {{answer_Y}} |||\n\n{{   answer_choices[goldstandard1]}}\n\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: goldstandard1_judgement
+    reference: ''
diff --git a/promptsource/templates/climate_fever/templates.yaml b/promptsource/templates/climate_fever/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7a17971108050092e202c37324ded2174e7166e4
--- /dev/null
+++ b/promptsource/templates/climate_fever/templates.yaml
@@ -0,0 +1,238 @@
+dataset: climate_fever
+templates:
+  38632cd9-7c4c-4e1d-85b3-20e7a78d4580: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: 38632cd9-7c4c-4e1d-85b3-20e7a78d4580
+    jinja: 'Here''s a statement and accompanying evidence. Does the evidence {{"supports"}},
+      {{"refutes"}}, or provide {{"not enough info"}} on climate change?
+
+
+      Statement: {{claim}}
+
+
+      Evidence: {{evidences[0]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[0]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: first_evidence_and_claim_itemization
+    reference: First evidence and claim with simple itemization
+  3970f474-a9e3-4264-aefa-dd4cfadd279c: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information ||| Disputed
+    id: 3970f474-a9e3-4264-aefa-dd4cfadd279c
+    jinja: 'Here''s a claim and accompanying evidence statements . Do the statements
+      {{"support"}}, {{"refute"}},  {{"dispute"}} or provide {{"not enough info"}}
+      on climate change?
+
+
+      Claim: {{claim}}
+
+
+      Statements:
+
+      - {{ evidences | map(attribute="evidence") | map("trim", "\".")  | join(".\n-
+      ") }}.
+
+      |||
+
+      {{ answer_choices[claim_label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: claim_and_all_supporting_evidences
+    reference: A claim and all supproting evidences provided with the associated claim
+      label
+  5d5062c1-d28f-4b1c-a7da-9b53796ed39f: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: 5d5062c1-d28f-4b1c-a7da-9b53796ed39f
+    jinja: 'Here''s a statement and accompanying evidence. Does the evidence {{"supports"}},
+      {{"refutes"}}, or provide {{"not enough info"}} on climate change?
+
+
+      Statement: {{claim}}
+
+
+      Evidence: {{evidences[4]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[4]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: fifth_evidence_and_claim_itemization
+    reference: Fifth evidence and claim with simple itemization
+  82c484bd-2ed7-4ee0-aaee-2b31ac68e751: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: 82c484bd-2ed7-4ee0-aaee-2b31ac68e751
+    jinja: 'Considering the following claim:
+
+      {{claim}}.
+
+      Does the following statement {{"supports"}}, {{"refutes"}}, or provide {{"not
+      enough info"}} on climate change?
+
+      {{evidences[4]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[4]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: fifth_evidence_claim_pair
+    reference: Relation between the claim and fifth evidence pair.
+  9ba074a2-fbcf-4f69-bf03-bd16dbdec9cd: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: 9ba074a2-fbcf-4f69-bf03-bd16dbdec9cd
+    jinja: 'Here''s a statement and accompanying evidence. Does the evidence {{"supports"}},
+      {{"refutes"}}, or provide {{"not enough info"}} on climate change?
+
+
+      Statement: {{claim}}
+
+
+      Evidence: {{evidences[3]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[3]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: fourth_evidence_and_claim_itemization
+    reference: Fourth evidence and claim with simple itemization
+  9f68b883-d6a3-4e95-af2a-b7755bc46ba9: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: 9f68b883-d6a3-4e95-af2a-b7755bc46ba9
+    jinja: 'Here''s a statement and accompanying evidence. Does the evidence {{"supports"}},
+      {{"refutes"}}, or provide {{"not enough info"}} on climate change?
+
+
+      Statement: {{claim}}
+
+
+      Evidence: {{evidences[2]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[2]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: third_evidence_and_claim_itemization
+    reference: Third evidence and claim with simple itemization
+  cb78a363-fd32-4dbd-976f-b56de644ba90: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: cb78a363-fd32-4dbd-976f-b56de644ba90
+    jinja: 'Considering the following claim:
+
+      {{claim}}.
+
+      Does the following statement {{"supports"}}, {{"refutes"}}, or provide {{"not
+      enough info"}} on climate change?
+
+      {{evidences[1]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[1]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: second_evidence_claim_pair
+    reference: Relation between the claim and second evidence pair.
+  cca7b6f5-29e3-45a4-bc8b-889f5ab2fc13: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: cca7b6f5-29e3-45a4-bc8b-889f5ab2fc13
+    jinja: 'Considering the following claim:
+
+      {{claim}}.
+
+      Does the following statement {{"supports"}}, {{"refutes"}}, or provide {{"not
+      enough info"}} on climate change?
+
+      {{evidences[0]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[0]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: first_evidence_claim_pair
+    reference: Relation between the claim and first evidence pair.
+  dc3e0a0b-4f4d-4a76-9e7b-eafce4967e98: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: dc3e0a0b-4f4d-4a76-9e7b-eafce4967e98
+    jinja: 'Considering the following claim:
+
+      {{claim}}.
+
+      Does the following statement {{"supports"}}, {{"refutes"}}, or provide {{"not
+      enough info"}} on climate change?
+
+      {{evidences[3]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[3]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: fourth_evidence_claim_pair
+    reference: Relation between the claim and fourth evidence pair.
+  e3e01825-e256-4098-b7bb-aa07c399e8f6: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: e3e01825-e256-4098-b7bb-aa07c399e8f6
+    jinja: 'Here''s a statement and accompanying evidence. Does the evidence {{"supports"}},
+      {{"refutes"}}, or provide {{"not enough info"}} on climate change?
+
+
+      Statement: {{claim}}
+
+
+      Evidence: {{evidences[1]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[1]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: second_evidence_and_claim_itemization
+    reference: Second evidence and claim with simple itemization
+  ff9c9c11-92f1-4cb2-a73c-d786d58b00e1: !Template
+    answer_choices: Supports ||| Refutes ||| Not enough information
+    id: ff9c9c11-92f1-4cb2-a73c-d786d58b00e1
+    jinja: 'Considering the following claim:
+
+      {{claim}}.
+
+      Does the following statement {{"supports"}}, {{"refutes"}}, or provide {{"not
+      enough info"}} on climate change?
+
+      {{evidences[2]["evidence"].strip(".").strip(''"'')}}.
+
+      |||
+
+      {{ answer_choices[evidences[2]["evidence_label"]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: third_evidence_claim_pair
+    reference: Relation between the claim and third evidence pair.
diff --git a/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml b/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02a6d5d1999030e1420b126b9be9e4867d390cf3
--- /dev/null
+++ b/promptsource/templates/cnn_dailymail/3.0.0/templates.yaml
@@ -0,0 +1,156 @@
+dataset: cnn_dailymail
+subset: 3.0.0
+templates:
+  0556fd07-f7de-4daf-a0ae-4cda4fc239c8: !Template
+    answer_choices: null
+    id: 0556fd07-f7de-4daf-a0ae-4cda4fc239c8
+    jinja: 'Can you write an outline of the following article in a few points?
+
+
+      Article: {{article}}|||
+
+      {{highlights}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: write_an_outline
+    reference: ''
+  1c446bde-b3e6-4629-860f-9125681c72a1: !Template
+    answer_choices: null
+    id: 1c446bde-b3e6-4629-860f-9125681c72a1
+    jinja: 'Summarise the article:
+
+
+      {{article}} |||
+
+      {{highlights}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: news_summary
+    reference: ''
+  6e46894f-b5ff-4259-a691-63f1da8405da: !Template
+    answer_choices: null
+    id: 6e46894f-b5ff-4259-a691-63f1da8405da
+    jinja: 'In 2 or 3 sentences, what are the main points one should remember from
+      this news article?
+
+
+      Article: {{article}} |||
+
+      {{highlights}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: 2_or_3_sentences
+    reference: ''
+  892cbf90-2c19-4664-943e-a004a0c9a7fa: !Template
+    answer_choices: null
+    id: 892cbf90-2c19-4664-943e-a004a0c9a7fa
+    jinja: 'Could you please generate a TLDR (Too Long Didn''t Read) summary of the
+      following news article?
+
+
+      Article: {{article}} |||
+
+      {{highlights}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: tldr_summary
+    reference: ''
+  9b7c6abf-5110-4b31-8345-be6b2eeea580: !Template
+    answer_choices: null
+    id: 9b7c6abf-5110-4b31-8345-be6b2eeea580
+    jinja: 'Condense the article down to the essentials to present it in the form
+      of short cards in mobile news apps:
+
+
+      {{article}} |||
+
+      {{highlights}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: news_card_view
+    reference: ''
+  b4ff2f63-8539-4d9c-9858-42fa5f95ba56: !Template
+    answer_choices: null
+    id: b4ff2f63-8539-4d9c-9858-42fa5f95ba56
+    jinja: 'Generate a story from key plot points:
+
+
+      {{highlights}} |||
+
+      {{article}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_story
+    reference: ''
+  c20ac3c5-da85-408a-bba9-8b12ef2ae379: !Template
+    answer_choices: null
+    id: c20ac3c5-da85-408a-bba9-8b12ef2ae379
+    jinja: 'Sum the following article in brief: {{article}}|||{{highlights}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: sum_in_brief
+    reference: ''
+  e047b4ec-abff-4b36-896a-83f5f1ea6759: !Template
+    answer_choices: null
+    id: e047b4ec-abff-4b36-896a-83f5f1ea6759
+    jinja: 'Extract key points from the article based on which the stock market could
+      react:
+
+
+      {{article}} |||
+
+      {{highlights}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: news_stock
+    reference: ''
+  efa42de6-7a20-4e35-92fc-919a5eb0b77e: !Template
+    answer_choices: null
+    id: efa42de6-7a20-4e35-92fc-919a5eb0b77e
+    jinja: 'What details would you include in a storyline to make it more engaging
+      and informative?
+
+
+      {{highlights}} |||
+
+      {{article}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: spice_up_story
+    reference: ''
diff --git a/promptsource/templates/codah/codah/templates.yaml b/promptsource/templates/codah/codah/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..236ef8f9fa462f04a36fbbf2a1bd13c4b29411e4
--- /dev/null
+++ b/promptsource/templates/codah/codah/templates.yaml
@@ -0,0 +1,192 @@
+dataset: codah
+subset: codah
+templates:
+  008b421e-3ca1-495b-acf8-d88fe352da53: !Template
+    answer_choices: null
+    id: 008b421e-3ca1-495b-acf8-d88fe352da53
+    jinja: '{{question_propmt}}
+
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Which is the correct ending?
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: answer_with_option_post
+    reference: ''
+  01fd9142-114e-43ea-bdef-9ccc46135ebb: !Template
+    answer_choices: null
+    id: 01fd9142-114e-43ea-bdef-9ccc46135ebb
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: answer_with_option
+    reference: ''
+  10a5996d-4aa5-40a8-95d9-fff842c75653: !Template
+    answer_choices: null
+    id: 10a5996d-4aa5-40a8-95d9-fff842c75653
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics: []
+      original_task: null
+    name: answer_no_option
+    reference: ''
+  30959166-08a2-4412-93e9-a865db651a09: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: 30959166-08a2-4412-93e9-a865db651a09
+    jinja: "{{question_propmt}}\n\nPossible categories for this sentence are {{',\
+      \ '.join(answer_choices)}}.\n\nFrom which category does the sentence belong?\
+      \ \n|||\n{{answer_choices[question_category]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category
+    reference: ''
+  36a9e72d-3bd5-4c81-bfd7-108706d1f8db: !Template
+    answer_choices: null
+    id: 36a9e72d-3bd5-4c81-bfd7-108706d1f8db
+    jinja: '{{question_propmt}}
+
+
+      Choose a candidate ending from this list:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: choose_from_list
+    reference: ''
+  99f0a5f0-2e5d-4e04-817c-8968be2cc760: !Template
+    answer_choices: null
+    id: 99f0a5f0-2e5d-4e04-817c-8968be2cc760
+    jinja: '{{question_propmt}}
+
+      {% for k in range(candidate_answers | length) %}
+
+      {{'' -> ''.join([(k + 1) | string, candidate_answers[k]])}}
+
+      {% endfor %}
+
+      Is the right answer {{"1, 2, 3 or 4"}} ?
+
+      |||
+
+      {{correct_answer_idx}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: answer_with_option_idx
+    reference: ''
+  9e383a33-67e3-4a03-a4c5-50f986022a71: !Template
+    answer_choices: null
+    id: 9e383a33-67e3-4a03-a4c5-50f986022a71
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Finish the sentence with the correct answer
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: finish_from_the_list_post
+    reference: ''
+  b4784090-b540-4de5-b391-a9f130c101be: !Template
+    answer_choices: null
+    id: b4784090-b540-4de5-b391-a9f130c101be
+    jinja: 'Finish the following text:
+
+
+      {{question_propmt}}
+
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics: []
+      original_task: null
+    name: finish_pre
+    reference: ''
+  c171ce3b-08c4-4056-af11-7bdb165fc75d: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: c171ce3b-08c4-4056-af11-7bdb165fc75d
+    jinja: '{{question_propmt}}
+
+
+      Which of  {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+      " or Others"])}} best describes the text?
+
+
+      |||
+
+      {{answer_choices[question_category]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category_bis
+    reference: ''
+  cc338e7b-c13c-4c4d-af51-7151c24c001e: !Template
+    answer_choices: null
+    id: cc338e7b-c13c-4c4d-af51-7151c24c001e
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence using one of the following endings:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: finish_from_the_list
+    reference: ''
diff --git a/promptsource/templates/codah/fold_0/templates.yaml b/promptsource/templates/codah/fold_0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..db2ac7c2fa3cf21e9bf6068e0055d02ad62653d8
--- /dev/null
+++ b/promptsource/templates/codah/fold_0/templates.yaml
@@ -0,0 +1,192 @@
+dataset: codah
+subset: fold_0
+templates:
+  1d9fa9d2-80d1-442c-986d-fb7598923d09: !Template
+    answer_choices: null
+    id: 1d9fa9d2-80d1-442c-986d-fb7598923d09
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Finish the sentence with the correct answer
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list_post
+    reference: ''
+  3b64d17a-225f-485b-b090-1a53fdeb1c90: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: 3b64d17a-225f-485b-b090-1a53fdeb1c90
+    jinja: '{{question_propmt}}
+
+
+      Which of  {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+      " or Others"])}} best describes the text?
+
+
+      |||
+
+      {{answer_choices[question_category]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category_bis
+    reference: ''
+  6b134736-8660-4457-a5de-f7dd0f1f148b: !Template
+    answer_choices: null
+    id: 6b134736-8660-4457-a5de-f7dd0f1f148b
+    jinja: '{{question_propmt}}
+
+      {% for k in range(candidate_answers | length) %}
+
+      {{'' -> ''.join([(k + 1) | string, candidate_answers[k]])}}
+
+      {% endfor %}
+
+      Is the right answer {{"1, 2, 3 or 4"}} ?
+
+      |||
+
+      {{correct_answer_idx}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_idx
+    reference: ''
+  88ec7e5d-304e-4dbd-9aad-6f2a69ec6147: !Template
+    answer_choices: null
+    id: 88ec7e5d-304e-4dbd-9aad-6f2a69ec6147
+    jinja: '{{question_propmt}}
+
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Which is the correct ending?
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_post
+    reference: ''
+  9527a0b1-0267-443e-83c8-dae9e6aa263b: !Template
+    answer_choices: null
+    id: 9527a0b1-0267-443e-83c8-dae9e6aa263b
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option
+    reference: ''
+  bd7bcef8-72fd-4782-85e7-a02c5b90d4a6: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: bd7bcef8-72fd-4782-85e7-a02c5b90d4a6
+    jinja: "{{question_propmt}}\n\nPossible categories for this sentence are {{',\
+      \ '.join(answer_choices)}}.\n\nFrom which category does the sentence belong?\
+      \ \n|||\n{{answer_choices[question_category]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category
+    reference: ''
+  c79ad64b-0a59-472e-aab4-804b01ddd3c1: !Template
+    answer_choices: null
+    id: c79ad64b-0a59-472e-aab4-804b01ddd3c1
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_no_option
+    reference: ''
+  c92cc7d8-07e1-48a4-abb8-d85d730fb935: !Template
+    answer_choices: null
+    id: c92cc7d8-07e1-48a4-abb8-d85d730fb935
+    jinja: 'Finish the following text:
+
+
+      {{question_propmt}}
+
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: finish_pre
+    reference: ''
+  dd274f66-6ec9-40e5-90b9-1b63cb3ead3e: !Template
+    answer_choices: null
+    id: dd274f66-6ec9-40e5-90b9-1b63cb3ead3e
+    jinja: '{{question_propmt}}
+
+
+      Choose a candidate ending from this list:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: choose_from_list
+    reference: ''
+  fd126b6a-c021-42a2-8a50-642f38dd1060: !Template
+    answer_choices: null
+    id: fd126b6a-c021-42a2-8a50-642f38dd1060
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence using one of the following endings:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list
+    reference: ''
diff --git a/promptsource/templates/codah/fold_1/templates.yaml b/promptsource/templates/codah/fold_1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0a5ef4d18a0977bc267836a9b05ecf0afd017063
--- /dev/null
+++ b/promptsource/templates/codah/fold_1/templates.yaml
@@ -0,0 +1,192 @@
+dataset: codah
+subset: fold_1
+templates:
+  0a7ef357-b2c2-4c1c-a4b9-7e069780ae76: !Template
+    answer_choices: null
+    id: 0a7ef357-b2c2-4c1c-a4b9-7e069780ae76
+    jinja: 'Finish the following text:
+
+
+      {{question_propmt}}
+
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: finish_pre
+    reference: ''
+  0d54d52b-91dc-4e23-98a1-3b81bbd8558f: !Template
+    answer_choices: null
+    id: 0d54d52b-91dc-4e23-98a1-3b81bbd8558f
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option
+    reference: ''
+  2adfb019-690a-482c-aecc-2b43168a9a2a: !Template
+    answer_choices: null
+    id: 2adfb019-690a-482c-aecc-2b43168a9a2a
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Finish the sentence with the correct answer
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list_post
+    reference: ''
+  3a946e9d-7b34-47ed-b3b8-7894ded8839d: !Template
+    answer_choices: null
+    id: 3a946e9d-7b34-47ed-b3b8-7894ded8839d
+    jinja: '{{question_propmt}}
+
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Which is the correct ending?
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_post
+    reference: ''
+  47651e9b-c25c-49a4-b32a-c0029cdc2aa2: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: 47651e9b-c25c-49a4-b32a-c0029cdc2aa2
+    jinja: "{{question_propmt}}\n\nPossible categories for this sentence are {{',\
+      \ '.join(answer_choices)}}.\n\nFrom which category does the sentence belong?\
+      \ \n|||\n{{answer_choices[question_category]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category
+    reference: ''
+  757a5faf-a48c-41b6-b7bd-a512d5e6107b: !Template
+    answer_choices: null
+    id: 757a5faf-a48c-41b6-b7bd-a512d5e6107b
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_no_option
+    reference: ''
+  9f1a9858-9528-47ed-a5ee-16d18b48c0da: !Template
+    answer_choices: null
+    id: 9f1a9858-9528-47ed-a5ee-16d18b48c0da
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence using one of the following endings:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list
+    reference: ''
+  a5aad762-bc91-4b21-b869-323aaea78d20: !Template
+    answer_choices: null
+    id: a5aad762-bc91-4b21-b869-323aaea78d20
+    jinja: '{{question_propmt}}
+
+      {% for k in range(candidate_answers | length) %}
+
+      {{'' -> ''.join([(k + 1) | string, candidate_answers[k]])}}
+
+      {% endfor %}
+
+      Is the right answer {{"1, 2, 3 or 4"}} ?
+
+      |||
+
+      {{correct_answer_idx}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_idx
+    reference: ''
+  c05e77b5-4a88-4b07-9a9a-baab17fe7e85: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: c05e77b5-4a88-4b07-9a9a-baab17fe7e85
+    jinja: '{{question_propmt}}
+
+
+      Which of  {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+      " or Others"])}} best describes the text?
+
+
+      |||
+
+      {{answer_choices[question_category]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category_bis
+    reference: ''
+  c3df4771-59a6-41a1-8e84-cf7bae719394: !Template
+    answer_choices: null
+    id: c3df4771-59a6-41a1-8e84-cf7bae719394
+    jinja: '{{question_propmt}}
+
+
+      Choose a candidate ending from this list:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: choose_from_list
+    reference: ''
diff --git a/promptsource/templates/codah/fold_2/templates.yaml b/promptsource/templates/codah/fold_2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a6343d2e0a6a4099669afc79efd084abea63792f
--- /dev/null
+++ b/promptsource/templates/codah/fold_2/templates.yaml
@@ -0,0 +1,192 @@
+dataset: codah
+subset: fold_2
+templates:
+  0516d1f3-da5d-4e0f-b320-e20b79ac4bfc: !Template
+    answer_choices: null
+    id: 0516d1f3-da5d-4e0f-b320-e20b79ac4bfc
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option
+    reference: ''
+  1e14c67b-13ae-4bc7-9919-2d405c79cfc3: !Template
+    answer_choices: null
+    id: 1e14c67b-13ae-4bc7-9919-2d405c79cfc3
+    jinja: 'Finish the following text:
+
+
+      {{question_propmt}}
+
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: finish_pre
+    reference: ''
+  1fa5ab62-06da-4f18-9a0a-d9851224cde5: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: 1fa5ab62-06da-4f18-9a0a-d9851224cde5
+    jinja: "{{question_propmt}}\n\nPossible categories for this sentence are {{',\
+      \ '.join(answer_choices)}}.\n\nFrom which category does the sentence belong?\
+      \ \n|||\n{{answer_choices[question_category]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category
+    reference: ''
+  698936f4-cdb9-41d4-8feb-bbb934ea7197: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: 698936f4-cdb9-41d4-8feb-bbb934ea7197
+    jinja: '{{question_propmt}}
+
+
+      Which of  {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+      " or Others"])}} best describes the text?
+
+
+      |||
+
+      {{answer_choices[question_category]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category_bis
+    reference: ''
+  8dd5e484-9763-4f70-9990-e0c1a94d76b0: !Template
+    answer_choices: null
+    id: 8dd5e484-9763-4f70-9990-e0c1a94d76b0
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence using one of the following endings:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list
+    reference: ''
+  b9f90c9f-2714-4b9c-bf10-1b540ee38dfa: !Template
+    answer_choices: null
+    id: b9f90c9f-2714-4b9c-bf10-1b540ee38dfa
+    jinja: '{{question_propmt}}
+
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Which is the correct ending?
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_post
+    reference: ''
+  bfb69adf-326b-4366-9de6-d566ab75ae2c: !Template
+    answer_choices: null
+    id: bfb69adf-326b-4366-9de6-d566ab75ae2c
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_no_option
+    reference: ''
+  d174023b-4a4a-4163-8eab-1736af5ebce5: !Template
+    answer_choices: null
+    id: d174023b-4a4a-4163-8eab-1736af5ebce5
+    jinja: '{{question_propmt}}
+
+      {% for k in range(candidate_answers | length) %}
+
+      {{'' -> ''.join([(k + 1) | string, candidate_answers[k]])}}
+
+      {% endfor %}
+
+      Is the right answer {{"1, 2, 3 or 4"}} ?
+
+      |||
+
+      {{correct_answer_idx}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_idx
+    reference: ''
+  e901db33-9607-4b65-b881-a36b2d0b2d87: !Template
+    answer_choices: null
+    id: e901db33-9607-4b65-b881-a36b2d0b2d87
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Finish the sentence with the correct answer
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list_post
+    reference: ''
+  e9e7ed0b-0a7f-438f-acd6-c0ff153ed9ce: !Template
+    answer_choices: null
+    id: e9e7ed0b-0a7f-438f-acd6-c0ff153ed9ce
+    jinja: '{{question_propmt}}
+
+
+      Choose a candidate ending from this list:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: choose_from_list
+    reference: ''
diff --git a/promptsource/templates/codah/fold_3/templates.yaml b/promptsource/templates/codah/fold_3/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..28c75251df14c77e8c370d3b54500292c2535ce4
--- /dev/null
+++ b/promptsource/templates/codah/fold_3/templates.yaml
@@ -0,0 +1,192 @@
+dataset: codah
+subset: fold_3
+templates:
+  15861c29-a2f1-4165-8849-83b88320fc3d: !Template
+    answer_choices: null
+    id: 15861c29-a2f1-4165-8849-83b88320fc3d
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Finish the sentence with the correct answer
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list_post
+    reference: ''
+  9ab1a3e6-6c03-4c37-9a85-d8128dc92545: !Template
+    answer_choices: null
+    id: 9ab1a3e6-6c03-4c37-9a85-d8128dc92545
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option
+    reference: ''
+  9efbda8e-19f8-47fb-907a-d19c660b0ab8: !Template
+    answer_choices: null
+    id: 9efbda8e-19f8-47fb-907a-d19c660b0ab8
+    jinja: 'Finish the following text:
+
+
+      {{question_propmt}}
+
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: finish_pre
+    reference: ''
+  9fecf40b-f96f-4124-80b0-038d5e58784c: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: 9fecf40b-f96f-4124-80b0-038d5e58784c
+    jinja: "{{question_propmt}}\n\nPossible categories for this sentence are {{',\
+      \ '.join(answer_choices)}}.\n\nFrom which category does the sentence belong?\
+      \ \n|||\n{{answer_choices[question_category]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category
+    reference: ''
+  a53e444c-da0d-4159-8488-35858b239d3d: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: a53e444c-da0d-4159-8488-35858b239d3d
+    jinja: '{{question_propmt}}
+
+
+      Which of  {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+      " or Others"])}} best describes the text?
+
+
+      |||
+
+      {{answer_choices[question_category]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category_bis
+    reference: ''
+  ce98e4d9-7eca-4101-8299-fb074b52d279: !Template
+    answer_choices: null
+    id: ce98e4d9-7eca-4101-8299-fb074b52d279
+    jinja: '{{question_propmt}}
+
+      {% for k in range(candidate_answers | length) %}
+
+      {{'' -> ''.join([(k + 1) | string, candidate_answers[k]])}}
+
+      {% endfor %}
+
+      Is the right answer {{"1, 2, 3 or 4"}} ?
+
+      |||
+
+      {{correct_answer_idx}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_idx
+    reference: ''
+  dd7a60fc-bec9-473b-b00d-f52c31c30b1c: !Template
+    answer_choices: null
+    id: dd7a60fc-bec9-473b-b00d-f52c31c30b1c
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_no_option
+    reference: ''
+  eb354069-d3ae-4707-b355-6a15b709e454: !Template
+    answer_choices: null
+    id: eb354069-d3ae-4707-b355-6a15b709e454
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence using one of the following endings:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list
+    reference: ''
+  f0fe6482-d937-42ee-bb71-72c8e8ffdf7e: !Template
+    answer_choices: null
+    id: f0fe6482-d937-42ee-bb71-72c8e8ffdf7e
+    jinja: '{{question_propmt}}
+
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Which is the correct ending?
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_post
+    reference: ''
+  fc1c9b44-e4ef-4ab1-adc2-496fe97ac01e: !Template
+    answer_choices: null
+    id: fc1c9b44-e4ef-4ab1-adc2-496fe97ac01e
+    jinja: '{{question_propmt}}
+
+
+      Choose a candidate ending from this list:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: choose_from_list
+    reference: ''
diff --git a/promptsource/templates/codah/fold_4/templates.yaml b/promptsource/templates/codah/fold_4/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..60c87cb9a980e129517708123db7152606ac6272
--- /dev/null
+++ b/promptsource/templates/codah/fold_4/templates.yaml
@@ -0,0 +1,192 @@
+dataset: codah
+subset: fold_4
+templates:
+  1511a72f-0975-44ab-90cd-4cc6c73d5442: !Template
+    answer_choices: null
+    id: 1511a72f-0975-44ab-90cd-4cc6c73d5442
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Finish the sentence with the correct answer
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list_post
+    reference: ''
+  1713e7dd-f4f6-453f-b849-11932955bc40: !Template
+    answer_choices: null
+    id: 1713e7dd-f4f6-453f-b849-11932955bc40
+    jinja: '{{question_propmt}}
+
+
+      Candidate answers:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option
+    reference: ''
+  1889f8c5-f868-4c7f-998a-699e2bbcb982: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: 1889f8c5-f868-4c7f-998a-699e2bbcb982
+    jinja: "{{question_propmt}}\n\nPossible categories for this sentence are {{',\
+      \ '.join(answer_choices)}}.\n\nFrom which category does the sentence belong?\
+      \ \n|||\n{{answer_choices[question_category]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category
+    reference: ''
+  3b179264-27cb-4ad0-ba66-5d701d898f27: !Template
+    answer_choices: null
+    id: 3b179264-27cb-4ad0-ba66-5d701d898f27
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence using one of the following endings:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: finish_from_the_list
+    reference: ''
+  42d83925-b370-4044-9cdd-89ae648a748a: !Template
+    answer_choices: null
+    id: 42d83925-b370-4044-9cdd-89ae648a748a
+    jinja: 'Finish the following text:
+
+
+      {{question_propmt}}
+
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: finish_pre
+    reference: ''
+  997d06e6-1b35-49b9-9aea-7cb36e8b6dae: !Template
+    answer_choices: null
+    id: 997d06e6-1b35-49b9-9aea-7cb36e8b6dae
+    jinja: '{{question_propmt}}
+
+
+      - {{ candidate_answers | join("\n- ") }}
+
+
+      Which is the correct ending?
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_post
+    reference: ''
+  bdca0580-7a1b-41f9-87cb-9526e959582d: !Template
+    answer_choices: null
+    id: bdca0580-7a1b-41f9-87cb-9526e959582d
+    jinja: '{{question_propmt}}
+
+
+      Finish the sentence
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_no_option
+    reference: ''
+  c03043f7-4465-4af2-aa19-6d663d1c307e: !Template
+    answer_choices: null
+    id: c03043f7-4465-4af2-aa19-6d663d1c307e
+    jinja: '{{question_propmt}}
+
+
+      Choose a candidate ending from this list:
+
+      - {{ candidate_answers | join("\n- ") }}
+
+      |||
+
+      {{candidate_answers[correct_answer_idx]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: choose_from_list
+    reference: ''
+  e083c44f-039a-41c7-a1fa-74b8917e487e: !Template
+    answer_choices: Idioms ||| Reference ||| Polysemy ||| Negation ||| Quantitative
+      ||| Others
+    id: e083c44f-039a-41c7-a1fa-74b8917e487e
+    jinja: '{{question_propmt}}
+
+
+      Which of  {{'', ''.join(["Idioms", "Reference", "Polysemy", "Negation", "Quantitative",
+      " or Others"])}} best describes the text?
+
+
+      |||
+
+      {{answer_choices[question_category]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question_category_bis
+    reference: ''
+  fa52cf98-4ffc-47f9-ba4a-96ae2eb95f42: !Template
+    answer_choices: null
+    id: fa52cf98-4ffc-47f9-ba4a-96ae2eb95f42
+    jinja: '{{question_propmt}}
+
+      {% for k in range(candidate_answers | length) %}
+
+      {{'' -> ''.join([(k + 1) | string, candidate_answers[k]])}}
+
+      {% endfor %}
+
+      Is the right answer {{"1, 2, 3 or 4"}} ?
+
+      |||
+
+      {{correct_answer_idx}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_with_option_idx
+    reference: ''
diff --git a/promptsource/templates/common_gen/templates.yaml b/promptsource/templates/common_gen/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7d8f8b042be2abd26e6eb755d951c3afa528e1b6
--- /dev/null
+++ b/promptsource/templates/common_gen/templates.yaml
@@ -0,0 +1,134 @@
+dataset: common_gen
+templates:
+  45778bd5-fddc-4c60-a77b-4eff5bed1c0b: !Template
+    answer_choices: null
+    id: 45778bd5-fddc-4c60-a77b-4eff5bed1c0b
+    jinja: "Ignoring the order of the concepts: {{ concepts | join(\", \") }}; \n\
+      Generate a sentence with all the concepts :\n|||\n{{target}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: Given concepts - type 2
+    reference: Ignoring the order of the concepts:...
+  684f1859-0b8d-4efe-82e1-7218838813cd: !Template
+    answer_choices: null
+    id: 684f1859-0b8d-4efe-82e1-7218838813cd
+    jinja: 'Put the concepts together to form a sentence: {{ concepts | join(", ")
+      }}.
+
+      |||
+
+      {{target}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: Put together
+    reference: This is similar to a task description
+  a4991cc7-cc91-4f37-af80-1983a02eb950: !Template
+    answer_choices: null
+    id: a4991cc7-cc91-4f37-af80-1983a02eb950
+    jinja: "Construct a sentence with the word {{ concepts | choice }}. \n\nHint:\
+      \ Use {{concepts | join(\", \")}} to restrict the output sentence.\n|||\n{{target}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: choice in concept centric sentence generation
+    reference: ''
+  b2033df4-7922-45b8-a113-e7784135cea9: !Template
+    answer_choices: null
+    id: b2033df4-7922-45b8-a113-e7784135cea9
+    jinja: "{% set seq = [ \n'From the concepts mentioned below, generate a sentence:',\
+      \ \n'Convert the concepts to a sentence:', \n'Given the list of concepts, write\
+      \ a sentence:'\n] %} \n{{ seq | choice }}\n{{ concepts | join(\", \") }}\n|||\n\
+      {{target}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: random task template prompt
+    reference: With a randomly chosen task template, generate a sentence from the
+      list of concepts
+  b7012213-04c4-424d-85fb-39d63d8a0ca2: !Template
+    answer_choices: null
+    id: b7012213-04c4-424d-85fb-39d63d8a0ca2
+    jinja: 'What are the topics in the sentence: {{target}}
+
+      |||
+
+      {{ concepts | join(", ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: topics from the sentence
+    reference: The template generates a random topic from the sentence
+  ed215962-8e51-45e7-b025-6e822f877098: !Template
+    answer_choices: null
+    id: ed215962-8e51-45e7-b025-6e822f877098
+    jinja: "We have the sentence: {{target}}; \nExtract all the key concepts: \n|||\n\
+      {{ concepts | join(\", \") }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: sentence to concepts
+    reference: Template identifies the concepts from the sentence
+  f3fce748-89e0-4b88-94bb-72ddb9a04d10: !Template
+    answer_choices: null
+    id: f3fce748-89e0-4b88-94bb-72ddb9a04d10
+    jinja: 'Can you write a sentence about the topic {{concepts | choice}}?
+
+      |||
+
+      {{target}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: topic to sentence
+    reference: 'Choose a random topic and ask the model to write a sentence about
+      it '
+  fa787974-86dd-4f66-b2d7-6d3523ce00e1: !Template
+    answer_choices: null
+    id: fa787974-86dd-4f66-b2d7-6d3523ce00e1
+    jinja: "Humans can easily string together abstract concepts to form a coherent\
+      \ sentence. \nFor example, with the concepts {{ concepts | join(\", \") }},\
+      \ a simple sentence can be  \n|||\n{{target}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: Example prompt
+    reference: The prompt is in the form of an example
+  fbeebb4f-cba1-4bc4-80ec-758a3c134033: !Template
+    answer_choices: null
+    id: fbeebb4f-cba1-4bc4-80ec-758a3c134033
+    jinja: "Given the list of concepts: {{ concepts | join(\", \") }}; \nGenerate\
+      \ a sentence with all the concepts :\n|||\n{{target}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: Given concepts type 1
+    reference: 'The prompt has the prefix "Given the .." '
diff --git a/promptsource/templates/commonsense_qa/templates.yaml b/promptsource/templates/commonsense_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ad34e9b488e145b293c139249914ea7ea1082550
--- /dev/null
+++ b/promptsource/templates/commonsense_qa/templates.yaml
@@ -0,0 +1,159 @@
+dataset: commonsense_qa
+templates:
+  1e1d0ce1-b0ea-4ad8-9971-b2b44948123b: !Template
+    answer_choices: null
+    id: 1e1d0ce1-b0ea-4ad8-9971-b2b44948123b
+    jinja: '{% if answerKey != "" %}
+
+      {{question}} |||
+
+      {{ choices[''text''][choices[''label''].index(answerKey)] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: not_multiple_choice
+    reference: ''
+  41188da5-c16a-4c6b-89af-6ce6815aedc6: !Template
+    answer_choices: null
+    id: 41188da5-c16a-4c6b-89af-6ce6815aedc6
+    jinja: '{% if answerKey != "" %}
+
+      {{question}}
+
+
+      {% for i in range(choices[''text'']|length) %}
+
+      - {{choices[''text''][i]}}
+
+      {% endfor %} |||
+
+      {{ choices[''text''][choices[''label''].index(answerKey)] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: qa
+    reference: ''
+  42fca80b-b614-4288-aad2-2525360543cb: !Template
+    answer_choices: null
+    id: 42fca80b-b614-4288-aad2-2525360543cb
+    jinja: '{% if answerKey != "" %}
+
+      Pick the most suitable answer for the following question:
+
+
+      {{question}}
+
+
+      Options:
+
+
+      {% for i in range(choices[''text'']|length) %}
+
+      {{choices[''label''][i]}}: {{choices[''text''][i]}}
+
+      {% endfor %} |||
+
+      {{ answerKey }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: test_question
+    reference: ''
+  4e46612a-a320-4692-bddc-37df45680df4: !Template
+    answer_choices: null
+    id: 4e46612a-a320-4692-bddc-37df45680df4
+    jinja: '{% if answerKey != "" %}
+
+      Pick all the incorrect/false options for the following question:
+
+
+      {{question}}
+
+
+      Options:
+
+
+      {% for i in range(choices[''text'']|length) %}
+
+      - {{choices[''text''][i]}}
+
+      {% endfor %} |||
+
+      {% for i in range(choices[''text'']|length) %}
+
+      {% if i != choices[''label''].index(answerKey) %}
+
+      - {{ choices[''text''][i] }}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: test_question_false
+    reference: ''
+  8e3f63fa-9ae6-4105-bd51-874b5e1d6b8e: !Template
+    answer_choices: null
+    id: 8e3f63fa-9ae6-4105-bd51-874b5e1d6b8e
+    jinja: '{% if answerKey != "" %}
+
+      Pick the most suitable answer for the following question:
+
+
+      {{question}}
+
+
+      Options:
+
+
+      {% for i in range(choices[''text'']|length) %}
+
+      - {{choices[''text''][i]}}
+
+      {% endfor %} |||
+
+      {{ choices[''text''][choices[''label''].index(answerKey)] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: most_suitable
+    reference: ''
+  bc718994-1d3e-4ae4-b65b-be307154b0a6: !Template
+    answer_choices: null
+    id: bc718994-1d3e-4ae4-b65b-be307154b0a6
+    jinja: '{% if answerKey != "" %}
+
+      Use the following options to predict a possible question for them:
+
+
+      {% for i in range(choices[''text'']|length) %}
+
+      - {{choices[''text''][i]}}
+
+      {% endfor %} |||
+
+      {{question}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_qn
+    reference: ''
diff --git a/promptsource/templates/conv_ai/templates.yaml b/promptsource/templates/conv_ai/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c03aba39790d8d759d3658be9b4510b17dfcbecf
--- /dev/null
+++ b/promptsource/templates/conv_ai/templates.yaml
@@ -0,0 +1,206 @@
+dataset: conv_ai
+templates:
+  1664cdd9-54e8-4679-821b-8013e9df197e: !Template
+    answer_choices: null
+    id: 1664cdd9-54e8-4679-821b-8013e9df197e
+    jinja: '{% if 0 < (thread | selectattr("userId", "equalto", "Alice") | list |
+      length) %} {{context}}
+
+
+      {% for utterance in thread %}
+
+      - {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+      {% endfor %}
+
+
+      Alice''s utterances in the previous conversation show that she was not interested
+      in the context, yes or no?
+
+      |||
+
+      {% for eval in evaluation %}
+
+      {% if "Alice" == eval["userId"] %}
+
+      {% if 3 > eval["engagement"] %}yes{% else %}no{% endif %}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: engagement_alice_1
+    reference: ''
+  2d3ca9f5-60f0-459d-932f-ab30e1e79b22: !Template
+    answer_choices: null
+    id: 2d3ca9f5-60f0-459d-932f-ab30e1e79b22
+    jinja: '{% if 0 < (thread | selectattr("userId", "equalto", "Bob") | list | length)
+      %} "{{context}}"
+
+
+      Would you say Bob engagement is real, given the previous context, in this conversation:
+
+      {% for utterance in thread %}
+
+      - {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+      {% endfor %}
+
+      |||
+
+      {% for eval in evaluation %}
+
+      {% if "Bob" == eval["userId"] %}
+
+      {% if 3 < eval["engagement"] %}yes{% else %}no{% endif %}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: engagement_bob_1
+    reference: ''
+  4390549a-3bdf-43ad-9e69-6bc380f33f01: !Template
+    answer_choices: null
+    id: 4390549a-3bdf-43ad-9e69-6bc380f33f01
+    jinja: '{% set alice = (evaluation|selectattr("userId", "equalto", "Alice")|first)["engagement"]
+      %} {% set bob = (evaluation|selectattr("userId", "equalto", "Bob")|first)["engagement"]
+      %} {% if (0 < (thread | selectattr("userId", "equalto", "Bob") | list | length))
+      and (0 < (thread | selectattr("userId", "equalto", "Alice") | list | length))
+      %} {{context}}
+
+
+      Given the previous context, who do you think is more engaged in this conversation
+      (Alice, Bob, or both):
+
+      {% for utterance in thread %}
+
+      - {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+      {% endfor %}
+
+      |||
+
+      {% if alice == bob %}both{% elif alice < bob %}Bob{% else %}Alice{% endif %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: engagement_0
+    reference: ''
+  955dd9f5-0fe5-46c3-a217-995bd876f26f: !Template
+    answer_choices: null
+    id: 955dd9f5-0fe5-46c3-a217-995bd876f26f
+    jinja: '{% if 0 < (thread | selectattr("userId", "equalto", "Bob") | list | length)
+      %}{{context}}
+
+
+      Given the preceding context, would you say Bob''s engagement is real, from his
+      utterances in a conversation:
+
+      {% for utterance in thread %}
+
+      {% if "Bob" == utterance["userId"] %}
+
+      - "{{ utterance["text"] }}",
+
+      {% endif %}{% endfor %}
+
+      |||
+
+      {% for eval in evaluation %}
+
+      {% if "Bob" == eval["userId"] %}
+
+      {% if 3 < eval["engagement"] %}yes{% else %}no{% endif %}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: engagement_bob_0
+    reference: ''
+  c0840f89-e444-49ab-bab0-08f71ec89093: !Template
+    answer_choices: null
+    id: c0840f89-e444-49ab-bab0-08f71ec89093
+    jinja: '{% if 0 < (thread | selectattr("userId", "equalto", "Alice") | list |
+      length) %} context:
+
+
+      {{context}}
+
+
+      conversation:
+
+      {% for utterance in thread %}
+
+      - {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+      {% endfor %}
+
+      Was Alice really into this conversation?|||
+
+      {% for eval in evaluation %}
+
+      {% if "Alice" == eval["userId"] %}
+
+      {% if 3 < eval["engagement"] %}yes{% else %}no{% endif %}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: engagement_alice_0
+    reference: ''
+  db94d56f-3fc8-4b6a-b3d3-2ac37e8110ff: !Template
+    answer_choices: null
+    id: db94d56f-3fc8-4b6a-b3d3-2ac37e8110ff
+    jinja: '{% set alice = (evaluation|selectattr("userId", "equalto", "Alice")|first)["engagement"]
+      %} {% set bob = (evaluation|selectattr("userId", "equalto", "Bob")|first)["engagement"]
+      %} {% if (0 < (thread | selectattr("userId", "equalto", "Bob") | list | length))
+      and (0 < (thread | selectattr("userId", "equalto", "Alice") | list | length))
+      %} {{context}}
+
+
+      Who would you say is more engaged here: Alice or Bob (or both are), given the
+      previous context?
+
+      {% for utterance in thread %}
+
+      {{ utterance["userId"] }}: {{ utterance["text"] }}
+
+      {% endfor %}
+
+      |||
+
+      {% if alice == bob %}both{% elif alice < bob %}Bob{% else %}Alice{% endif %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: engagement_1
+    reference: ''
diff --git a/promptsource/templates/conv_ai_2/templates.yaml b/promptsource/templates/conv_ai_2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85515e75d170f904a1f04a68b02b730978548b56
--- /dev/null
+++ b/promptsource/templates/conv_ai_2/templates.yaml
@@ -0,0 +1,182 @@
+dataset: conv_ai_2
+templates:
+  04f7a3d8-c40f-45d1-b9ae-5bd23ff11628: !Template
+    answer_choices: null
+    id: 04f7a3d8-c40f-45d1-b9ae-5bd23ff11628
+    jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+      "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+      "equalto", "Human") | list | length)) %}
+
+      Would a person self-describing with such statements:
+
+      {% for bp in user_profile %}
+
+      - "{{ bp | join('''') }}",
+
+      {% endfor %}
+
+      say things like:
+
+      {% for utterance in dialog %}
+
+      {% if class == utterance["sender_class"] %}
+
+      - "{{ utterance["text"] }}",
+
+      {% endif %}{% endfor %}
+
+      in a conversation?|||
+
+      {% if "Human" == class %}yes{% else %}no{% endif %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: match_profile_1
+    reference: ''
+  2dfa7a0c-46d5-4842-be2f-ae62fa80d581: !Template
+    answer_choices: null
+    id: 2dfa7a0c-46d5-4842-be2f-ae62fa80d581
+    jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+      "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+      "equalto", "Human") | list | length)) %}
+
+      I wonder if somebody describing herself, or himself, using these statements:
+
+      {% for bp in user_profile %}
+
+      - "{{ bp | join('''') }}",
+
+      {% endfor %}
+
+      could utter things like:
+
+      {% for utterance in dialog %}
+
+      {% if class == utterance["sender_class"] %}
+
+      - "{{ utterance["text"] }}",
+
+      {% endif %}{% endfor %}
+
+      in a conversation...
+
+      What''s your guess: yes or no?|||
+
+      {% if "Human" == class %}yes{% else %}no{% endif %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: match_profile_2
+    reference: ''
+  6ff0a746-5cf0-4a73-9dd9-8e08ddf0768a: !Template
+    answer_choices: null
+    id: 6ff0a746-5cf0-4a73-9dd9-8e08ddf0768a
+    jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+      "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+      "equalto", "Human") | list | length)) %}
+
+      Somebody using the following self-describing statements:
+
+      {% for bp in user_profile %}
+
+      - {{ bp | join('''') }}
+
+      {% endfor %}
+
+      might possibly say things like:
+
+      {% for utterance in dialog %}
+
+      {% if class == utterance["sender_class"] %}
+
+      - {{ utterance["text"] }}
+
+      {% endif %}{% endfor %}
+
+      Do you agree?|||
+
+      {% if "Human" == class %}yes{% else %}no{% endif %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: match_profile_4
+    reference: ''
+  cb296bf2-5189-48af-9517-a1f802509eb1: !Template
+    answer_choices: null
+    id: cb296bf2-5189-48af-9517-a1f802509eb1
+    jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+      "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+      "equalto", "Human") | list | length)) %}
+
+      {% for bp in user_profile %}
+
+      - "{{ bp | join('''') }}"
+
+      {% endfor %}
+
+      This profile matches a person saying things like:
+
+      {% for utterance in dialog %}
+
+      {% if class == utterance["sender_class"] %}
+
+      - "{{ utterance["text"] }}"
+
+      {% endif %}{% endfor %}
+
+      yes of no?|||
+
+      {% if "Human" == class %}yes{% else %}no{% endif %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: match_profile_3
+    reference: ''
+  eea6d2da-1c4c-460c-a1f9-f64cfd8c21c7: !Template
+    answer_choices: null
+    id: eea6d2da-1c4c-460c-a1f9-f64cfd8c21c7
+    jinja: '{% set class = ["Human", "Bot"] | random %} {% if (0 < (dialog | selectattr("sender_class",
+      "equalto", "Bot") | list | length)) and (0 < (dialog | selectattr("sender_class",
+      "equalto", "Human") | list | length)) %}
+
+      Given the following profile:
+
+      {% for bp in user_profile %}
+
+      - "{{ bp | join('''') }}"
+
+      {% endfor %}
+
+      would these utterances be expected:
+
+      {% for utterance in dialog %}
+
+      {% if class == utterance["sender_class"] %}
+
+      - "{{ utterance["text"] }}"
+
+      {% endif %}{% endfor %}
+
+      from a person in a conversation?|||
+
+      {% if "Human" == class %}yes{% else %}no{% endif %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: match_profile_0
+    reference: ''
diff --git a/promptsource/templates/conv_ai_3/templates.yaml b/promptsource/templates/conv_ai_3/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1eaee871fa99591fe25bfc3c4fa74a9136835f13
--- /dev/null
+++ b/promptsource/templates/conv_ai_3/templates.yaml
@@ -0,0 +1,70 @@
+dataset: conv_ai_3
+templates:
+  04de512a-b097-474e-b952-3f47548ae557: !Template
+    answer_choices: null
+    id: 04de512a-b097-474e-b952-3f47548ae557
+    jinja: Given the request "{{initial_request}}", would a conversational system
+      likely need clarification to answer it?|||{% if 1 < clarification_need%}yes{%
+      else %}no{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: clarification_needed_0
+    reference: ''
+  2b94810c-515d-455f-a7d0-d1465d5f4f9d: !Template
+    answer_choices: null
+    id: 2b94810c-515d-455f-a7d0-d1465d5f4f9d
+    jinja: Could a conversational system somehow handle the request "{{initial_request}}"
+      without clarification?|||{% if 4 == clarification_need%}no{% else %}yes{% endif
+      %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: absolutely_ambiguous_1
+    reference: ''
+  5c302d76-b34c-44e2-9f56-96901758060a: !Template
+    answer_choices: null
+    id: 5c302d76-b34c-44e2-9f56-96901758060a
+    jinja: Would the request "{{initial_request}}" be absolutely ambiguous for a conversational
+      system?|||{% if 4 == clarification_need%}yes{% else %}no{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: absolutely_ambiguous_0
+    reference: ''
+  691d46e5-f1b0-4c7b-90b9-6da9711fd054: !Template
+    answer_choices: null
+    id: 691d46e5-f1b0-4c7b-90b9-6da9711fd054
+    jinja: Is the request "{{initial_request}}" to a conversational agent self-contained?|||{%
+      if 1 == clarification_need%}yes{% else %}no{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: self_contained_0
+    reference: ''
+  a1bad8cc-ee02-465d-a51d-a2b79a75075e: !Template
+    answer_choices: null
+    id: a1bad8cc-ee02-465d-a51d-a2b79a75075e
+    jinja: 'Would a conversational system directly able to answer this request: "{{initial_request}}"?|||{%
+      if 1 == clarification_need%}yes{% else %}no{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: self_contained_1
+    reference: ''
+  d5479a4d-a57d-4005-995e-ec10bff02123: !Template
+    answer_choices: null
+    id: d5479a4d-a57d-4005-995e-ec10bff02123
+    jinja: Would a conversational agent likely need clarification to answer "{{initial_request}}"?|||{%
+      if 1 < clarification_need%}yes{% else %}no{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: clarification_needed_1
+    reference: ''
diff --git a/promptsource/templates/coqa/templates.yaml b/promptsource/templates/coqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d46dc7610146122ca6ac5c34b993838001090194
--- /dev/null
+++ b/promptsource/templates/coqa/templates.yaml
@@ -0,0 +1,107 @@
+dataset: coqa
+templates:
+  530616d3-1cc8-4faa-a855-4b21e0da9ec9: !Template
+    answer_choices: null
+    id: 530616d3-1cc8-4faa-a855-4b21e0da9ec9
+    jinja: "Answer the question based on the information contained in the passage.\n\
+      Q: {{questions[0]}} \n\nPassage: {{story}}\n\nA: ||| {{answers[\"input_text\"\
+      ][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: false
+    name: first_qa_turn
+    reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+  7c0e2256-961c-48e9-bc08-6c270cc68b4a: !Template
+    answer_choices: null
+    id: 7c0e2256-961c-48e9-bc08-6c270cc68b4a
+    jinja: 'Answer the last question based on the hint.
+
+      {% for question, answer in zip(questions[:-1], answers["input_text"][:-1]) %}
+
+      Q: {{question}}
+
+
+      A:{{answer}}
+
+      {%endfor %}
+
+
+      Q: {{questions[-1]}}
+
+
+      Hint: {{story}}
+
+
+      A:|||
+
+      {{answers["input_text"][-1]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: last_qa_turn
+    reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
+  a22e8bf1-f5af-43eb-b38c-002462261da2: !Template
+    answer_choices: null
+    id: a22e8bf1-f5af-43eb-b38c-002462261da2
+    jinja: 'Can you form a set of {{questions | length}} question-answer pairs about
+      the passage below?
+
+
+      Passage: {{story}}|||
+
+      {% for question, answer in zip(questions, answers["input_text"]) %}
+
+      Q: {{question}}
+
+
+      A: {{answer}}
+
+
+      {% endfor %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_dialogue
+    reference: ''
+  cb53f12f-e781-4a92-bbb0-fbef19bd2d29: !Template
+    answer_choices: null
+    id: cb53f12f-e781-4a92-bbb0-fbef19bd2d29
+    jinja: 'In the passage below, extract the part which answers the question:
+
+
+      Q: {{questions[0]}}
+
+      Passage: {{story}} |||
+
+      {{story[answers["answer_start"][0] : answers["answer_end"][0] ]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: extract_answer_first_qa_turn
+    reference: ''
+  dc05f09a-0b2b-4448-9226-45dcc4cf52e6: !Template
+    answer_choices: null
+    id: dc05f09a-0b2b-4448-9226-45dcc4cf52e6
+    jinja: "{% set missing_idx = range(questions|length)|choice %}\n\n{% for i in\
+      \ range(questions|length) %}\nQ: {{questions[i] }}\n\nA:  {% if i !=missing_idx\
+      \ %}\n{{answers[\"input_text\"][i]}}\n{%endif%}\n{%endfor%}\n\nGiven the above\
+      \ conversation, give a suitable response to the missing answer\n\nHint: {{story}}\n\
+      ||| \n{{answers[\"input_text\"][missing_idx]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: missing_answer
+    reference: 'Metric: variant of SQuAD (Section 6.1 of the paper)'
diff --git a/promptsource/templates/cord19/metadata/templates.yaml b/promptsource/templates/cord19/metadata/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e140a5cb2510d2a864123eeb0107f8fdf68cc52f
--- /dev/null
+++ b/promptsource/templates/cord19/metadata/templates.yaml
@@ -0,0 +1,58 @@
+dataset: cord19
+subset: metadata
+templates:
+  10d78ae0-635d-4cf3-8e24-61c879fd6ae7: !Template
+    answer_choices: null
+    id: 10d78ae0-635d-4cf3-8e24-61c879fd6ae7
+    jinja: "Write a scientific title for the following abstract: {{abstract}} |||\n\
+      title: \n{{ title }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: title generation 1
+    reference: ''
+  1821279d-37a7-42f0-ab0c-2a5589a2a7c3: !Template
+    answer_choices: null
+    id: 1821279d-37a7-42f0-ab0c-2a5589a2a7c3
+    jinja: "Title: {{title}}\nGenerate a plausible scientific abstract for a scientific\
+      \ paper on Covid-19 with the previous title |||\n {{ abstract }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: abstract generation 1
+    reference: ''
+  21fc3c51-5168-4abb-b969-81a115f2f568: !Template
+    answer_choices: null
+    id: 21fc3c51-5168-4abb-b969-81a115f2f568
+    jinja: "Write a scientific abstract for a paper on Covid-19 with the following\
+      \ title: {{title}}|||\nAbstract: \n{{ abstract }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: abstract generation 2
+    reference: ''
+  6a2ebf64-9db7-41f0-85a5-379270c54fa6: !Template
+    answer_choices: null
+    id: 6a2ebf64-9db7-41f0-85a5-379270c54fa6
+    jinja: "Absract: \n{{abstract}}.\nWhat could a scientific title be for this abstract\
+      \ on Covid-19? |||\nTitle: \n{{ title }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: title generation 2
+    reference: ''
+  c895c4f1-d5e1-4a07-9ae9-0268c218e526: !Template
+    answer_choices: null
+    id: c895c4f1-d5e1-4a07-9ae9-0268c218e526
+    jinja: "Write a scientific abstract for a research paper on Coronavirus disease\
+      \ with the following title: {{title}}|||\nAbstract: \n{{ abstract }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: abstract generation 3
+    reference: ''
diff --git a/promptsource/templates/cos_e/v1.0/templates.yaml b/promptsource/templates/cos_e/v1.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..776190b56d998ab12c3b5f510e8e526530094dde
--- /dev/null
+++ b/promptsource/templates/cos_e/v1.0/templates.yaml
@@ -0,0 +1,224 @@
+dataset: cos_e
+subset: v1.0
+templates:
+  1040d9f9-4ba6-44a5-9d44-aa907ef35d49: !Template
+    answer_choices: '{{ choices | join("|||") }}'
+    id: 1040d9f9-4ba6-44a5-9d44-aa907ef35d49
+    jinja: '{{ question }}
+
+      Choose the most suitable option to answer the above question.
+
+      Options:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: question_description_option_text
+    reference: ''
+  60e81a2b-8441-41c9-a904-46746216b621: !Template
+    answer_choices: A ||| B ||| C
+    id: 60e81a2b-8441-41c9-a904-46746216b621
+    jinja: "{{ question }}\nChoose the most suitable option to answer the above question.\n\
+      Options\uFF1A\n{% for k in range(choices | length) %}\n{{'. '.join([answer_choices[k],\
+      \ choices[k]])}}\n{% endfor %}\n|||\n{{ answer_choices[choices.index(answer)]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: question_description_option_id
+    reference: ''
+  836b1643-b0c7-4c21-b33f-1a0aacae6562: !Template
+    answer_choices: '{{ choices | join("|||") }}'
+    id: 836b1643-b0c7-4c21-b33f-1a0aacae6562
+    jinja: '{{ question }}
+
+      - {{ answer_choices | join("\n- ") }}
+
+
+      The best answer is
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: question_option_description_text
+    reference: ''
+  97ffc573-3aaf-46b5-873f-cd1081c87ea2: !Template
+    answer_choices: null
+    id: 97ffc573-3aaf-46b5-873f-cd1081c87ea2
+    jinja: 'Question: {{ question }}
+
+      Options:
+
+      - {{ choices | join("\n- ") }}
+
+
+      The answer is "{{ answer }}" because
+
+      |||
+
+      {{ abstractive_explanation }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: generate_explanation_given_text
+    reference: ''
+  9ad6c3c2-883f-474f-98e1-7afc7744485c: !Template
+    answer_choices: null
+    id: 9ad6c3c2-883f-474f-98e1-7afc7744485c
+    jinja: 'Here''s a question: {{ question }}
+
+
+      Here are possible answers to this question:
+
+      - {{ choices | join("\n- ") }}
+
+
+      I believe the correct choice is "{{answer}}", here''s why:
+
+      |||
+
+      {{ abstractive_explanation }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: i_think
+    reference: ''
+  b46fcaba-9076-49b6-ab5a-ebdbd5a098b4: !Template
+    answer_choices: null
+    id: b46fcaba-9076-49b6-ab5a-ebdbd5a098b4
+    jinja: "Question: {{question}}\n\nChoices: \n- {{ choices | join(\"\\n- \") }}\n\
+      \nThe rationale to choose \"{{answer}}\" as the answer is that: |||\n{{abstractive_explanation}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: rationale
+    reference: ''
+  bf17f5c6-65e9-4449-ba49-f5fde0041d08: !Template
+    answer_choices: A ||| B ||| C
+    id: bf17f5c6-65e9-4449-ba49-f5fde0041d08
+    jinja: '{{ question }}
+
+      {% for k in range(choices | length) %}
+
+      {{''. ''.join([answer_choices[k], choices[k]])}}
+
+      {% endfor %}
+
+      The best answer is
+
+      |||
+
+      {{ answer_choices[choices.index(answer)] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: question_option_description_id
+    reference: ''
+  e57e45eb-9d02-4e15-9a95-ba4ef68245c1: !Template
+    answer_choices: '{{ choices | join("|||") }}'
+    id: e57e45eb-9d02-4e15-9a95-ba4ef68245c1
+    jinja: 'Pick the option in line with common sense to answer the question.
+
+      Questions: {{ question }}
+
+      Options:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: description_question_option_text
+    reference: ''
+  ee4a3703-db30-4fb5-9cb5-29a15be03fbf: !Template
+    answer_choices: A ||| B ||| C
+    id: ee4a3703-db30-4fb5-9cb5-29a15be03fbf
+    jinja: 'Pick the option in line with common sense to answer the question.
+
+      Question: {{ question }}
+
+      Options:
+
+      {% for k in range(choices | length) %}
+
+      {{''. ''.join([answer_choices[k], choices[k]])}}
+
+      {% endfor %}
+
+      |||
+
+      {{ answer_choices[choices.index(answer)] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: description_question_option_id
+    reference: ''
+  ef98a220-34e2-46cd-8466-fe817af8ec44: !Template
+    answer_choices: null
+    id: ef98a220-34e2-46cd-8466-fe817af8ec44
+    jinja: 'Question: {{ question }}
+
+      Options:
+
+      - {{ choices | join("\n- ") }}
+
+
+      Explain why a human would choose "{{answer}}" to answer the question above:
+
+      |||
+
+      {{ abstractive_explanation }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: explain_why_human
+    reference: ''
+  fc3474c4-63ec-4a94-87cf-0e3044b29282: !Template
+    answer_choices: null
+    id: fc3474c4-63ec-4a94-87cf-0e3044b29282
+    jinja: "Here's a question and a few possible answers: \n\nQ: {{ question }}\n\
+      Possible A: {{ choices | join(\", \") }}\n\nWhy is \"{{answer}}\" an answer\
+      \ aligned with human common sense? \n|||\n{{ abstractive_explanation }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: aligned_with_common_sense
+    reference: ''
diff --git a/promptsource/templates/cos_e/v1.11/templates.yaml b/promptsource/templates/cos_e/v1.11/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d0a15dfbe5783c3060e8f57789040ea8e8708ee6
--- /dev/null
+++ b/promptsource/templates/cos_e/v1.11/templates.yaml
@@ -0,0 +1,224 @@
+dataset: cos_e
+subset: v1.11
+templates:
+  02a87cd3-6595-44bd-a384-95bdc8b3dd0c: !Template
+    answer_choices: '{{ choices | join("|||") }}'
+    id: 02a87cd3-6595-44bd-a384-95bdc8b3dd0c
+    jinja: '{{ question }}
+
+      Choose the most suitable option to answer the above question.
+
+      Options:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: question_description_option_text
+    reference: ''
+  046ce4df-c847-4dc2-b53c-9f02d32aff8a: !Template
+    answer_choices: A ||| B ||| C ||| D ||| E
+    id: 046ce4df-c847-4dc2-b53c-9f02d32aff8a
+    jinja: "{{ question }}\nChoose the most suitable option to answer the above question.\n\
+      Options\uFF1A\n{% for k in range(choices | length) %}\n{{'. '.join([answer_choices[k],\
+      \ choices[k]])}}\n{% endfor %}\n|||\n{{ answer_choices[choices.index(answer)]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: question_description_option_id
+    reference: ''
+  25863d16-34be-4c5f-9040-11d5c6398b4b: !Template
+    answer_choices: null
+    id: 25863d16-34be-4c5f-9040-11d5c6398b4b
+    jinja: "Question: {{question}}\n\nChoices: \n- {{ choices | join(\"\\n- \") }}\n\
+      \nThe rationale to choose \"{{answer}}\" as the answer is that: |||\n{{abstractive_explanation}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: rationale
+    reference: ''
+  4b946a87-b39c-4f01-9041-832d82da48af: !Template
+    answer_choices: '{{ choices | join("|||") }}'
+    id: 4b946a87-b39c-4f01-9041-832d82da48af
+    jinja: '{{ question }}
+
+      - {{ answer_choices | join("\n- ") }}
+
+
+      The best answer is
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: question_option_description_text
+    reference: ''
+  55dd7471-c01e-4197-a8cd-d8e6359ef582: !Template
+    answer_choices: null
+    id: 55dd7471-c01e-4197-a8cd-d8e6359ef582
+    jinja: "Here's a question and a few possible answers: \n\nQ: {{ question }}\n\
+      Possible A: {{ choices | join(\", \") }}\n\nWhy is \"{{answer}}\" an answer\
+      \ aligned with human common sense? \n|||\n{{ abstractive_explanation }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: aligned_with_common_sense
+    reference: ''
+  60354294-f30a-4a5b-be18-372c3c1a3491: !Template
+    answer_choices: A ||| B ||| C ||| D ||| E
+    id: 60354294-f30a-4a5b-be18-372c3c1a3491
+    jinja: 'Pick the option in line with common sense to answer the question.
+
+      Question: {{ question }}
+
+      Options:
+
+      {% for k in range(choices | length) %}
+
+      {{''. ''.join([answer_choices[k], choices[k]])}}
+
+      {% endfor %}
+
+      |||
+
+      {{ answer_choices[choices.index(answer)] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: description_question_option_id
+    reference: ''
+  73f0f76b-c7f9-41fd-b4df-705625ab8241: !Template
+    answer_choices: null
+    id: 73f0f76b-c7f9-41fd-b4df-705625ab8241
+    jinja: 'Question: {{ question }}
+
+      Options:
+
+      - {{ choices | join("\n- ") }}
+
+
+      Explain why a human would choose "{{answer}}" to answer the question above:
+
+      |||
+
+      {{ abstractive_explanation }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: explain_why_human
+    reference: ''
+  90a7d84f-0316-4b28-a4fe-2f61c0126158: !Template
+    answer_choices: null
+    id: 90a7d84f-0316-4b28-a4fe-2f61c0126158
+    jinja: 'Question: {{ question }}
+
+      Options:
+
+      - {{ choices | join("\n- ") }}
+
+
+      The answer is "{{ answer }}" because
+
+      |||
+
+      {{ abstractive_explanation }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_explanation_given_text
+    reference: ''
+  a8036e94-ad4a-4f26-9765-cf7223800138: !Template
+    answer_choices: '{{ choices | join("|||") }}'
+    id: a8036e94-ad4a-4f26-9765-cf7223800138
+    jinja: 'Pick the option in line with common sense to answer the question.
+
+      Questions: {{ question }}
+
+      Options:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: description_question_option_text
+    reference: ''
+  e57a5c48-209c-4e82-b061-dbc8d124dffa: !Template
+    answer_choices: null
+    id: e57a5c48-209c-4e82-b061-dbc8d124dffa
+    jinja: 'Here''s a question: {{ question }}
+
+
+      Here are possible answers to this question:
+
+      - {{ choices | join("\n- ") }}
+
+
+      I believe the correct choice is "{{answer}}", here''s why:
+
+      |||
+
+      {{ abstractive_explanation }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: i_think
+    reference: ''
+  f678d224-23f0-488b-9c5d-0bf466a0aa16: !Template
+    answer_choices: A ||| B ||| C ||| D ||| E
+    id: f678d224-23f0-488b-9c5d-0bf466a0aa16
+    jinja: '{{ question }}
+
+      {% for k in range(choices | length) %}
+
+      {{''. ''.join([answer_choices[k], choices[k]])}}
+
+      {% endfor %}
+
+      The best answer is
+
+      |||
+
+      {{ answer_choices[choices.index(answer)] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: question_option_description_id
+    reference: ''
diff --git a/promptsource/templates/cosmos_qa/templates.yaml b/promptsource/templates/cosmos_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9e417bfb5a10cb1d90ae9d4fe558b165fe6a8bb7
--- /dev/null
+++ b/promptsource/templates/cosmos_qa/templates.yaml
@@ -0,0 +1,262 @@
+dataset: cosmos_qa
+templates:
+  015f333d-2a15-4552-9fe3-a20bd781001e: !Template
+    answer_choices: null
+    id: 015f333d-2a15-4552-9fe3-a20bd781001e
+    jinja: "Based on the context and the answer, generate a question. \n\nContext:\
+      \ {{context}}\n\nAnswer:\n{% if label == 0 %}\n{{answer0}}\n{% elif label ==\
+      \ 1 %}\n{{answer1}}\n{% elif label == 2 %}\n{{answer2}}\n{% elif label == 3\
+      \ %}\n{{answer3}}\n{% endif %}\n|||\n{{question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: context_answer_to_question
+    reference: 'Template asks the model to generate questions '
+  08e20b79-d1c0-4717-b538-f1a313c2b7d2: !Template
+    answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+    id: 08e20b79-d1c0-4717-b538-f1a313c2b7d2
+    jinja: "Read the following context and choose the best option to answer the question.\n\
+      Context: {{ context }}\nQuestion: {{ question }}\nOptions: \n- {{ answer_choices\
+      \ | join(\"\\n - \") }}\n|||\n{{ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: description_context_question_answer_text
+    reference: 'Template generates the answer. Answer cues are included. '
+  67d6ba13-4958-4e5e-842c-ada92aead6cc: !Template
+    answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+    id: 67d6ba13-4958-4e5e-842c-ada92aead6cc
+    jinja: 'Read the following context and answer the question.
+
+      Context: {{ context }}
+
+      Question: {{ question }}
+
+      Answer:
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: description_context_question_text
+    reference: Template generates the answer
+  693c47c6-f17c-417a-af70-bc20e71b4ed4: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 693c47c6-f17c-417a-af70-bc20e71b4ed4
+    jinja: "Read the following context and choose the best option to answer the question.\n\
+      Context: {{ context }}\nQuestion: {{ question }}\nOptions: \nA. {{ answer0 }}\n\
+      B. {{ answer1 }}\nC. {{ answer2 }}\nD. {{ answer3 }}\n|||\n{{ answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: description_context_question_answer_id
+    reference: Template asks the model to pick the correct answer
+  6b9a24cc-054e-40d6-8abf-261443122f3a: !Template
+    answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+    id: 6b9a24cc-054e-40d6-8abf-261443122f3a
+    jinja: '{{ context }}
+
+      According to the above context, choose the best option to answer the following
+      question.
+
+      Question: {{ question }}
+
+      Options:
+
+      - {{answer_choices | join("\n - ")}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_description_question_answer_text
+    reference: The template asks the model to generate the answer
+  71325300-1f16-4a68-97c7-a03457f00cc7: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 71325300-1f16-4a68-97c7-a03457f00cc7
+    jinja: '{{ context }}
+
+      {{ question }}
+
+      A. {{ answer0 }}
+
+      B. {{ answer1 }}
+
+      C. {{ answer2 }}
+
+      D. {{ answer3 }}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: no_prompt_id
+    reference: 'No prompt with context and question. '
+  7c30b1a1-14da-4458-95e8-c35f8de23110: !Template
+    answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+    id: 7c30b1a1-14da-4458-95e8-c35f8de23110
+    jinja: '{{ context }}
+
+      Question: {{ question }}
+
+      The answer to the above question:
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: context_question_description_text
+    reference: Context, question, task description, and generate the answer
+  85e9ae2c-fbb7-47ed-980c-56da5299e9af: !Template
+    answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+    id: 85e9ae2c-fbb7-47ed-980c-56da5299e9af
+    jinja: '{{ context }}
+
+      {{ question }}
+
+      - {{ answer_choices | join("\n - ") }}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: no_prompt_text
+    reference: 'No prompt with answer choices. The template asks the model to generate
+      the answer. '
+  8a60255c-d44d-4f20-a631-ae1c0c9a7d68: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 8a60255c-d44d-4f20-a631-ae1c0c9a7d68
+    jinja: '{{ context }}
+
+      According to the above context, choose the best option to answer the following
+      question.
+
+      Question: {{ question }}
+
+      Options:
+
+      A. {{ answer0 }}
+
+      B. {{ answer1 }}
+
+      C. {{ answer2 }}
+
+      D. {{ answer3 }}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_description_question_answer_id
+    reference: Original task with context, question and the answer choices.
+  9dc80101-516d-448e-8e05-a62b4acead3b: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 9dc80101-516d-448e-8e05-a62b4acead3b
+    jinja: '{{ context }}
+
+      {{ question }}
+
+      Pick the best answer from the following options:
+
+      A. {{ answer0 }}
+
+      B. {{ answer1 }}
+
+      C. {{ answer2 }}
+
+      D. {{ answer3 }}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_question_description_answer_id
+    reference: Template asks the model to pick the correct answer
+  c07c459e-f1f7-409e-9da7-fe5c993a4933: !Template
+    answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+    id: c07c459e-f1f7-409e-9da7-fe5c993a4933
+    jinja: '{{ context }}
+
+      According to the above context, answer the following question.
+
+      {{ question }}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_description_question_text
+    reference: The template asks the model to generate the answer without any answer
+      cues
+  d5499348-5cb3-467b-a543-206b5dd9806e: !Template
+    answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+    id: d5499348-5cb3-467b-a543-206b5dd9806e
+    jinja: '{{ context }}
+
+      {{ question }}
+
+      Pick the best answer from the following options:
+
+      - {{ answer_choices | join("\n - ") }}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_question_description_answer_text
+    reference: 'Context, question,  task description, and answer choices '
+  e640e365-091c-491e-a87e-f529514607e5: !Template
+    answer_choices: '{{answer0}} ||| {{answer1}} ||| {{answer2}} ||| {{answer3}}'
+    id: e640e365-091c-491e-a87e-f529514607e5
+    jinja: "{{question}} \n|||\n{{ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: only_question_answer
+    reference: Template with only question and generates the answer
diff --git a/promptsource/templates/covid_qa_castorini/templates.yaml b/promptsource/templates/covid_qa_castorini/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1a5e45953afc63d884b0e7a63da81038e5120f4f
--- /dev/null
+++ b/promptsource/templates/covid_qa_castorini/templates.yaml
@@ -0,0 +1,48 @@
+dataset: covid_qa_castorini
+templates:
+  481dcd72-1674-4962-b711-0dbf146ae836: !Template
+    answer_choices: null
+    id: 481dcd72-1674-4962-b711-0dbf146ae836
+    jinja: 'Generate a possible question that could be answered with the following
+      papers (only titles have been provided):
+
+
+      {{answers["title"]|join(", ")}} |||
+
+      {{question_query}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: papers_to_qn
+    reference: ''
+  56915f43-ebd6-44dc-9aac-6098ec2d1b32: !Template
+    answer_choices: null
+    id: 56915f43-ebd6-44dc-9aac-6098ec2d1b32
+    jinja: 'Provide the keyword form of the following query:
+
+
+      {{question_query}} |||
+
+      {{keyword_query}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: keyword_form
+    reference: ''
+  665bfa4a-b83f-4431-acda-29855c89916b: !Template
+    answer_choices: null
+    id: 665bfa4a-b83f-4431-acda-29855c89916b
+    jinja: 'Generate a possible question to the following answers:
+
+
+      {{answers["exact_answer"]|join(", ")}} |||
+
+      {{question_query}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answers_to_qn
+    reference: ''
diff --git a/promptsource/templates/craffel/openai_lambada/templates.yaml b/promptsource/templates/craffel/openai_lambada/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6e391170bfe99b8951b8fdc0d069b621d472248b
--- /dev/null
+++ b/promptsource/templates/craffel/openai_lambada/templates.yaml
@@ -0,0 +1,72 @@
+dataset: craffel/openai_lambada
+templates:
+  1ee5ddef-fffb-4b73-a2f7-f600ffac63cb: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: 1ee5ddef-fffb-4b73-a2f7-f600ffac63cb
+    jinja: '{{ text.split()[:-1] | join('' '') }}...
+
+
+      What comes after the ellipses? ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: ellipses
+    reference: ''
+  4f08e9d4-bcff-4bc0-9902-87c497625d17: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: 4f08e9d4-bcff-4bc0-9902-87c497625d17
+    jinja: 'Fill in the blank:
+
+
+      {{ text.split()[:-1] | join('' '') }} ____. ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style
+    reference: Brown et al.
+  507de732-8298-4971-bac3-7d768d511a31: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: 507de732-8298-4971-bac3-7d768d511a31
+    jinja: '{{ text.split()[:-1] | join('' '') }} ____.
+
+
+      Fill in the ____: ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fill in the ____
+    reference: ''
+  774b4349-0524-4a34-881b-b344f8f5c34e: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: 774b4349-0524-4a34-881b-b344f8f5c34e
+    jinja: 'This story got cut short. What comes next?
+
+
+      {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: what comes next
+    reference: ''
+  ef072a60-252e-4c52-aa8a-4152bb4dd83c: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: ef072a60-252e-4c52-aa8a-4152bb4dd83c
+    jinja: 'Please predict the next word after the following chunk of text.
+
+
+      {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: please next word
+    reference: ''
diff --git a/promptsource/templates/craigslist_bargains/templates.yaml b/promptsource/templates/craigslist_bargains/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a7587d4f8124390fe72c7153861ea378ed47fd8a
--- /dev/null
+++ b/promptsource/templates/craigslist_bargains/templates.yaml
@@ -0,0 +1,298 @@
+dataset: craigslist_bargains
+templates:
+  145dd841-b971-4550-bc88-305ad3278d58: !Template
+    answer_choices: null
+    id: 145dd841-b971-4550-bc88-305ad3278d58
+    jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+      is the buyer, and the second speaker is the seller.
+
+
+      {{utterance | join("\n\n")}}
+
+
+      From the seller''s point of view, this deal could be considered
+
+      |||
+
+      {% set nonzero_price = [] %}
+
+      {% for p in dialogue_acts["price"] %}
+
+      {% if p>-1 %}
+
+      {{nonzero_price.append(p) or ""}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% set final_price = -1 %}
+
+      {% if nonzero_price | length != 0 %}{% set final_price = nonzero_price[-1] %}{%
+      endif %}
+
+      {% if final_price == -1 %}
+
+      incomplete
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) == ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      good
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) < ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      bad
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) > ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      good
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: good deal for seller no list price implicit
+    reference: implicit version of "good deal for seller no list price"
+  27010b55-dd5b-4ee9-9e14-a4b809aa6cdb: !Template
+    answer_choices: null
+    id: 27010b55-dd5b-4ee9-9e14-a4b809aa6cdb
+    jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+      is the buyer, and the second speaker is the seller.
+
+
+      {{utterance | join("\n\n")}}
+
+
+      Was this a good deal for the seller? answer "yes", "no", or "unknown".
+
+      |||
+
+      {% set nonzero_price = [] %}
+
+      {% for p in dialogue_acts["price"] %}
+
+      {% if p>-1 %}
+
+      {{nonzero_price.append(p) or ""}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% set final_price = -1 %}
+
+      {% if nonzero_price | length != 0 %}{% set final_price = nonzero_price[-1] %}{%
+      endif %}
+
+      {% if final_price == -1 %}
+
+      unknown
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) == ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      yes
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) < ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      no
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) > ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      yes
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: good deal for seller no list price
+    reference: same as "good deal for seller" prompt, but excludes the list price
+  3e1e2993-2b41-493b-8f27-9a7bb7fa4a0b: !Template
+    answer_choices: null
+    id: 3e1e2993-2b41-493b-8f27-9a7bb7fa4a0b
+    jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+      is the buyer, and the second speaker is the seller. The listed price was ${{items["Price"][0]}}0.
+
+
+      {{utterance | join("\n\n")}}
+
+
+      How much was the gap between the target of the buyer and the seller?
+
+      |||
+
+      ${{(agent_info[''Target''][0] - agent_info[''Target''][1]) | abs}}0'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: gap between parties
+    reference: asks model explicitly what the gap was between the buyer's target and
+      the seller's target
+  78d1b487-c535-4a0d-ae49-055d321db3fd: !Template
+    answer_choices: null
+    id: 78d1b487-c535-4a0d-ae49-055d321db3fd
+    jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+      is the buyer, and the second speaker is the seller. The listed price was ${{items["Price"][0]}}.
+
+
+      {{utterance | join("\n\n")}}
+
+
+      Was this a good deal for the seller? Answer "yes" or "no", or "unknown".
+
+      |||
+
+      {% set nonzero_price = [] %}
+
+      {% for p in dialogue_acts["price"] %}
+
+      {% if p>-1 %}
+
+      {{nonzero_price.append(p) or ""}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% set final_price = -1 %}
+
+      {% if nonzero_price | length != 0 %}{% set final_price = nonzero_price[-1] %}{%
+      endif %}
+
+      {% if final_price == -1 %}
+
+      unknown
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) == ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      yes
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) < ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      no
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) > ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      yes
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: good deal for seller
+    reference: asks the model whether the deal was good for the seller or not (it's
+      good if the seller's target is closer to the final price than the buyer's, or
+      there is a tie)
+  a1dbb258-2e5c-4160-986b-46fc03546965: !Template
+    answer_choices: null
+    id: a1dbb258-2e5c-4160-986b-46fc03546965
+    jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+      is the buyer, and the second speaker is the seller. The listed price was ${{items["Price"][0]}}.
+
+
+      {{utterance | join("\n\n")}}
+
+
+      Question: Which party got the better deal? Choose from:
+
+      a) the buyer
+
+      b) the seller
+
+      c) neither - it is a fair compromise
+
+      d) unknown
+
+
+      Answer:
+
+      |||
+
+      {% set nonzero_price = [] %}
+
+      {% for p in dialogue_acts["price"] %}
+
+      {% if p>-1 %}
+
+      {{nonzero_price.append(p) or ""}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {% set final_price = -1 %}
+
+      {% if nonzero_price | length != 0 %}{% set final_price = nonzero_price[-1] %}{%
+      endif %}
+
+      {% if final_price == -1 %}
+
+      d) unknown
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) == ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      c) neither - it is a fair compromise
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) < ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      a) the buyer
+
+      {% elif ((final_price - agent_info["Target"][0]) | abs) > ((final_price - agent_info["Target"][1])
+      | abs) %}
+
+      b) the seller
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: best deal
+    reference: explicitly asks model which party got the best deal
+  b891bbe7-b3da-4118-b54f-4faa647e36f4: !Template
+    answer_choices: null
+    id: b891bbe7-b3da-4118-b54f-4faa647e36f4
+    jinja: 'The following conversation is a negotiation on craigslist. The first speaker
+      is the buyer, and the second speaker is the seller.
+
+      {% set nonempty_utterance = [] %}
+
+      {% for line in utterance %}
+
+      {% if line != "" %}{{ nonempty_utterance.append(line) or ""}}
+
+      {% endif %}
+
+      {% endfor %}
+
+
+      {{nonempty_utterance[:-1] | join(''\n\n'')}}
+
+
+      Question: What could be the next line in the negotiation?
+
+      Answer:
+
+      |||
+
+      {{nonempty_utterance[-1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: generate line
+    reference: Generates the next line of negotiation
diff --git a/promptsource/templates/crows_pairs/templates.yaml b/promptsource/templates/crows_pairs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e25c008e5e3156889783eb6d6bca32b3717a9b2
--- /dev/null
+++ b/promptsource/templates/crows_pairs/templates.yaml
@@ -0,0 +1,179 @@
+dataset: crows_pairs
+templates:
+  50238ba3-15ab-46f1-8c8b-fd6e038eceb3: !Template
+    answer_choices: Yes ||| No
+    id: 50238ba3-15ab-46f1-8c8b-fd6e038eceb3
+    jinja: 'You are presented with two very similar sentences:
+
+      A: {{sent_more}}
+
+      B: {{sent_less}}
+
+      Sentence A is illustrating a stereotype. Yes or no?
+
+      |||
+
+      {{answer_choices[stereo_antistereo]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: stereotype
+    reference: ''
+  5980c232-d5c1-4cfa-9e50-7533fab7f5d6: !Template
+    answer_choices: Sentence A demonstrates a stereotype ||| Sentence B violates a
+      stereotype
+    id: 5980c232-d5c1-4cfa-9e50-7533fab7f5d6
+    jinja: 'Sentence A: {{sent_more}}
+
+      Sentence B: {{sent_less}}
+
+      Select the option that is the most plausible:
+
+      - {{answer_choices[0]}}
+
+      - {{answer_choices[1]}}
+
+      |||
+
+      {{answer_choices[stereo_antistereo]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: demontraste_or_violate
+    reference: ''
+  75c17e06-5c05-470d-8697-debcf9888944: !Template
+    answer_choices: null
+    id: 75c17e06-5c05-470d-8697-debcf9888944
+    jinja: '{% if stereo_antistereo == 0 %}
+
+      "{{sent_more}}"
+
+      The previous sentence illustrates a stereotype against a historically disadvantaged
+      group in the US. Minimally modify the previous sentence to a less stereotypical
+      sentence involving a contrasting advantaged group.
+
+      |||
+
+      {{sent_less}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: transform_stereo
+    reference: ''
+  8da874a6-0749-43fd-bff6-1e32d3e0a9f9: !Template
+    answer_choices: Yes ||| No
+    id: 8da874a6-0749-43fd-bff6-1e32d3e0a9f9
+    jinja: 'A: {{sent_more}}
+
+      B: {{sent_less}}
+
+      I believe sentence A is conveying a stereotype. Is that correct? Answer with
+      yes or no.
+
+      |||
+
+      {{answer_choices[stereo_antistereo]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: stereotype_confirm
+    reference: ''
+  cc25e0aa-ff4f-40e3-a05a-634cdbd6f1d9: !Template
+    answer_choices: No ||| Yes
+    id: cc25e0aa-ff4f-40e3-a05a-634cdbd6f1d9
+    jinja: 'You are presented with two very similar sentences:
+
+      A: {{sent_more}}
+
+      B: {{sent_less}}
+
+      Sentence B is illustrating an anti-stereotype. Yes or no?
+
+      |||
+
+      {{answer_choices[stereo_antistereo]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: anti_stereotype
+    reference: ''
+  d1645b27-c4d8-4682-a2ce-cfddb9bdc6ad: !Template
+    answer_choices: null
+    id: d1645b27-c4d8-4682-a2ce-cfddb9bdc6ad
+    jinja: '{% if stereo_antistereo == 1 %}
+
+      "{{sent_less}}"
+
+      The previous sentence violates a stereotype against a historically disadvantaged
+      group in the US. Minimally modify the previous sentence to a less stereotypical
+      sentence involving a contrasting advantage group.
+
+      |||
+
+      {{sent_more}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: transform_anti_stereo
+    reference: ''
+  f0711ab7-3093-463a-9493-9ace26df99e3: !Template
+    answer_choices: No ||| Yes
+    id: f0711ab7-3093-463a-9493-9ace26df99e3
+    jinja: 'Sentence 1: {{sent_more}}
+
+      Sentence 2: {{sent_less}}
+
+      I think that Sentence 2 conveys an anti-stereotype. Is that correct? Answer
+      yes or no.
+
+      |||
+
+      {{answer_choices[stereo_antistereo]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: anti_stereotype_confirm
+    reference: ''
+  f2f7e1dd-6c23-4dbb-bdb1-4145b42b4344: !Template
+    answer_choices: Race and color ||| Socioeconomic status and occupation ||| Gender
+      and gender identity ||| Disability ||| Nationality ||| Sexual orientation |||
+      Physical appearance ||| Religion ||| Age
+    id: f2f7e1dd-6c23-4dbb-bdb1-4145b42b4344
+    jinja: 'Here are two sentences. One is more stereotypical than the other. What
+      kind of stereotypes are these two sentences illustrating? Choose between {{answer_choices
+      | join(", ") | lower}}.
+
+      Sentence 1: {{sent_more}}
+
+      Sentence 2: {{sent_less}}
+
+      |||
+
+      {{answer_choices[bias_type]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: which_bias
+    reference: ''
diff --git a/promptsource/templates/dbpedia_14/templates.yaml b/promptsource/templates/dbpedia_14/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..92c85aa892523150065e095a1026f728cef0aaee
--- /dev/null
+++ b/promptsource/templates/dbpedia_14/templates.yaml
@@ -0,0 +1,70 @@
+dataset: dbpedia_14
+templates:
+  824ecb55-ecad-40c2-8033-f2fa0add2ddf: !Template
+    answer_choices: Company ||| Educational Institution ||| Artist ||| Athlete |||
+      Office Holder ||| Mean Of Transportation ||| Building ||| Natural Place |||
+      Village ||| Animal ||| Plant ||| Album ||| Film ||| Written Work
+    id: 824ecb55-ecad-40c2-8033-f2fa0add2ddf
+    jinja: '{{content}} Given a list of categories: {{"company, educational institution,
+      artist, athlete, office holder, mean of transportation, building, natural place,
+      village, animal, plant, album, film or written work"}}, what category does the
+      paragraph belong to? ||| {{ answer_choices[label] }}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: given_list_what_category_does_the_paragraph_belong_to
+    reference: ''
+  8eda7e71-6734-486f-b883-e99d3f14c0bf: !Template
+    answer_choices: Company ||| Educational Institution ||| Artist ||| Athlete |||
+      Office Holder ||| Mean Of Transportation ||| Building ||| Natural Place |||
+      Village ||| Animal ||| Plant ||| Album ||| Film ||| Written Work
+    id: 8eda7e71-6734-486f-b883-e99d3f14c0bf
+    jinja: Pick one category for the following text. The options are - {{"company,
+      educational institution, artist, athlete, office holder, mean of transportation,
+      building, natural place, village, animal, plant, album, film or written work"}}.
+      {{title}} - {{content}} ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: pick_one_category_for_the_following_text
+    reference: ''
+  9dfa5d15-96bc-41ee-ad89-4f8df5c4ff67: !Template
+    answer_choices: Company ||| Educational Institution ||| Artist ||| Athlete |||
+      Office Holder ||| Mean Of Transportation ||| Building ||| Natural Place |||
+      Village ||| Animal ||| Plant ||| Album ||| Film ||| Written Work
+    id: 9dfa5d15-96bc-41ee-ad89-4f8df5c4ff67
+    jinja: '{{title}} - {{content}} Given a choice of categories {{"company, educational
+      institution, artist, athlete, office holder, mean of transportation, building,
+      natural place, village, animal, plant, album, film or written work"}}, the text
+      refers to which one? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: 'given_a_choice_of_categories '
+    reference: ''
+  f72fa410-3278-4f62-91f0-f9edf4a4e792: !Template
+    answer_choices: Company ||| Educational Institution ||| Artist ||| Athlete |||
+      Office Holder ||| Mean Of Transportation ||| Building ||| Natural Place |||
+      Village ||| Animal ||| Plant ||| Album ||| Film ||| Written Work
+    id: f72fa410-3278-4f62-91f0-f9edf4a4e792
+    jinja: '"{{title}}", given a list of categories: {{"company, educational institution,
+      artist, athlete, office holder, mean of transportation, building, natural place,
+      village, animal, plant, album, film or written work"}}, what category does the
+      title belong to? ||| {{ answer_choices[label] }}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: given_a_list_of_category_what_does_the_title_belong_to
+    reference: ''
diff --git a/promptsource/templates/discofuse/discofuse-sport/templates.yaml b/promptsource/templates/discofuse/discofuse-sport/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8d01423fbb1a5528a5c59980bdb1bd91eceffcda
--- /dev/null
+++ b/promptsource/templates/discofuse/discofuse-sport/templates.yaml
@@ -0,0 +1,177 @@
+dataset: discofuse
+subset: discofuse-sport
+templates:
+  03f85406-df19-4bba-9ff7-53e050db6c84: !Template
+    answer_choices: null
+    id: 03f85406-df19-4bba-9ff7-53e050db6c84
+    jinja: "{% if coherent_second_sentence==\"\" %}\n\nDecompose the following sentence\
+      \ into two separate sentences:\n\n{{coherent_first_sentence}}\n\n|||\n\n{{incoherent_first_sentence}}\
+      \ {{incoherent_second_sentence}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: decompose 2
+    reference: ''
+  0e00ea8a-dc1b-4b3d-9f6f-13378e6e739d: !Template
+    answer_choices: null
+    id: 0e00ea8a-dc1b-4b3d-9f6f-13378e6e739d
+    jinja: "{% if discourse_type != \"PAIR_NONE\" %}\n\nPassage 1: {{incoherent_first_sentence}}\
+      \ {{incoherent_second_sentence}}\n\nPassage 2: {{coherent_first_sentence}} {{coherent_second_sentence}}\n\
+      \nWhich of the following discourse phenomena have been used to turn Passage\
+      \ 1 into Passage 2?\n\n{{\"A: Apposition\"}}\n\n{{\"B: Relative Clauses\"}}\n\
+      \n{{\"C: Cataphora\"}}\n\n{{\"D: Verb Phrase Coordination\"}}\n\n{{\"E: Anaphora\"\
+      }}\n\n{{\"F: Inner Connectives\"}}\n\n{{\"G: Both Inner Connectives and Anaphora\"\
+      }}\n\n{{\"H: Sentence Coordination\"}}\n\n{{\"I: Both Sentence Coordination\
+      \ and Anaphora\"}}\n\n{{\"J: Forward Connectives\"}}\n\n{{\"K: Discourse Connectives\"\
+      }}\n\n{{\"L: Both Discourse Connectives and Anaphora\"}}\n\nAnswer with both\
+      \ the option letter and phenomena name.\n|||\n\n{{\n{\"PAIR_ANAPHORA\": \"E\"\
+      ,\n\"PAIR_CONN\": \"K\", \n\"PAIR_CONN_ANAPHORA\": \"L\",\n\"SINGLE_APPOSITION\"\
+      : \"A\",\n\"SINGLE_CATAPHORA\": \"C\",\n\"SINGLE_CONN_INNER\": \"F\",\n\"SINGLE_CONN_INNER_ANAPHORA\"\
+      : \"G\",\n\"SINGLE_CONN_START\": \"J\",\n\"SINGLE_RELATIVE\": \"B\",\n\"SINGLE_S_COORD\"\
+      :\"H\",\n\"SINGLE_S_COORD_ANAPHORA\": \"I\",\n\"SINGLE_VP_COORD\": \"D\"\n}[discourse_type]\n\
+      }}\n\n\n{% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Grammar detection
+    reference: ''
+  26c4cd24-45db-4d40-a04b-7c6f0e1e27d0: !Template
+    answer_choices: null
+    id: 26c4cd24-45db-4d40-a04b-7c6f0e1e27d0
+    jinja: "{% if coherent_second_sentence==\"\" %}\n\nRead this sentence:\n\n{{coherent_first_sentence}}\n\
+      \nNow, read this second sentence, that covers some of the information from the\
+      \ first:\n\n{{incoherent_first_sentence}}\n\nWrite a sentence that covers the\
+      \ information from the first sentence that is missing from the second\n|||\n\
+      \n {{incoherent_second_sentence}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: decompose remainder 1
+    reference: ''
+  2f4a3f45-2367-495c-84ca-fee5833527b4: !Template
+    answer_choices: null
+    id: 2f4a3f45-2367-495c-84ca-fee5833527b4
+    jinja: 'Rewrite the following two sentences so that they flow better:
+
+
+      first sentence: {{incoherent_first_sentence}}
+
+
+      Second Sentence: {{incoherent_second_sentence}}
+
+
+      |||
+
+
+      {{coherent_first_sentence}} {{coherent_second_sentence}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fuse_instruction
+    reference: ''
+  3af62454-2938-4fff-ab0c-8083ba09b92b: !Template
+    answer_choices: null
+    id: 3af62454-2938-4fff-ab0c-8083ba09b92b
+    jinja: 'Here are two sentences:
+
+
+
+      1: {{incoherent_first_sentence}}
+
+
+      2: {{incoherent_second_sentence}}
+
+
+      Please edit them so that they sound more connected to each other, perhaps by
+      fusing the sentences together.
+
+      |||
+
+
+      {{coherent_first_sentence}} {{coherent_second_sentence}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fuse_instruction_2
+    reference: ''
+  6f1920ac-6b78-4892-8932-ccf92de5270d: !Template
+    answer_choices: null
+    id: 6f1920ac-6b78-4892-8932-ccf92de5270d
+    jinja: "{% if coherent_second_sentence==\"\" %}\nI'm doing some research, and\
+      \ found these facts:\n\n{{incoherent_first_sentence}} {{incoherent_second_sentence}}\
+      \ \n\nHow could I rewrite my facts to sound more natural?\n\n|||\n\n{{coherent_first_sentence}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fuse_4
+    reference: ''
+  73d198a5-9532-4894-9f26-3dccd60640ab: !Template
+    answer_choices: null
+    id: 73d198a5-9532-4894-9f26-3dccd60640ab
+    jinja: "{% if coherent_second_sentence==\"\" %}\n\nRewrite these two sentences\
+      \ as one sentence:\n\nFirst sentence: {{incoherent_first_sentence}} \n\nSecond\
+      \ sentence: {{incoherent_second_sentence}} \n\n|||\n\n{{coherent_first_sentence}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fuse_3
+    reference: ''
+  c9afa74a-c76f-4d8d-ac17-b3a477273a8e: !Template
+    answer_choices: null
+    id: c9afa74a-c76f-4d8d-ac17-b3a477273a8e
+    jinja: "{% if discourse_type != \"PAIR_NONE\" %}\n\nPassage A: {{coherent_first_sentence}}\
+      \ {{coherent_second_sentence}}\n\nPassage B: {{incoherent_first_sentence}} {{incoherent_second_sentence}}\n\
+      \nWhich of the following discourse phenomena have been removed in order to turn\
+      \ Passage A into Passage B?\n\n{{\"1: Apposition\"}}\n\n{{\"2: Relative Clauses\"\
+      }}\n\n{{\"3: Cataphora\"}}\n\n{{\"4: Verb Phrase Coordination\"}}\n\n{{\"5:\
+      \ Anaphora\"}}\n\n{{\"6: Inner Connectives\"}}\n\n{{\"7: Both Inner Connectives\
+      \ and Anaphora\"}}\n\n{{\"8: Sentence Coordination\"}}\n\n{{\"9 Both Sentence\
+      \ Coordination and Anaphora\"}}\n\n{{\"10: Forward Connectives\"}}\n\n{{\"11:\
+      \ Discourse Connectives\"}}\n\n{{\"12: Both Discourse Connectives and Anaphora\"\
+      }}\n\nAnswer with both the option letter and phenomena name.\n|||\n\n{{\n{\"\
+      PAIR_ANAPHORA\": \"5\",\n\"PAIR_CONN\": \"11\", \n\"PAIR_CONN_ANAPHORA\": \"\
+      12\",\n\"SINGLE_APPOSITION\": \"1\",\n\"SINGLE_CATAPHORA\": \"3\",\n\"SINGLE_CONN_INNER\"\
+      : \"6\",\n\"SINGLE_CONN_INNER_ANAPHORA\": \"7\",\n\"SINGLE_CONN_START\": \"\
+      10\",\n\"SINGLE_RELATIVE\": \"2\",\n\"SINGLE_S_COORD\":\"8\",\n\"SINGLE_S_COORD_ANAPHORA\"\
+      : \"9\",\n\"SINGLE_VP_COORD\": \"4\"\n}[discourse_type]\n}}\n\n\n{% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Grammar detection 2
+    reference: ''
+  ee884693-a941-46a1-a9d4-4f3af95dfd93: !Template
+    answer_choices: null
+    id: ee884693-a941-46a1-a9d4-4f3af95dfd93
+    jinja: "{% if coherent_second_sentence==\"\" %}\n{{coherent_first_sentence}}\n\
+      \nDecompose this sentence into two sentences\n|||\n\n{{incoherent_first_sentence}}\
+      \ {{incoherent_second_sentence}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: decompose
+    reference: ''
+  f9b1102b-5545-4fe4-9782-f50a80c62e56: !Template
+    answer_choices: null
+    id: f9b1102b-5545-4fe4-9782-f50a80c62e56
+    jinja: "{% if coherent_second_sentence==\"\" %}\n\nRead this sentence:\n\n{{coherent_first_sentence}}\n\
+      \nNow, read this second sentence, that covers some of the information from the\
+      \ first:\n\n{{incoherent_second_sentence}}\n\nWrite a sentence that covers the\
+      \ information from the first sentence that is missing from the second\n|||\n\
+      \n {{incoherent_first_sentence}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: decompose remainder 2
+    reference: ''
diff --git a/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml b/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bc61ece17acfc02bc60e175d15a12fa61ef5e87b
--- /dev/null
+++ b/promptsource/templates/discofuse/discofuse-wikipedia/templates.yaml
@@ -0,0 +1,177 @@
+dataset: discofuse
+subset: discofuse-wikipedia
+templates:
+  0a7fb8ae-c695-4f78-bd92-35dec191e258: !Template
+    answer_choices: null
+    id: 0a7fb8ae-c695-4f78-bd92-35dec191e258
+    jinja: 'Here are two sentences:
+
+
+
+      1: {{incoherent_first_sentence}}
+
+
+      2: {{incoherent_second_sentence}}
+
+
+      Please edit them so that they sound more connected to each other, perhaps by
+      fusing the sentences together.
+
+      |||
+
+
+      {{coherent_first_sentence}} {{coherent_second_sentence}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fuse_instruction_2
+    reference: ''
+  223b3d21-f809-4876-9273-31d75307eb06: !Template
+    answer_choices: null
+    id: 223b3d21-f809-4876-9273-31d75307eb06
+    jinja: "{% if coherent_second_sentence==\"\" %}\n{{coherent_first_sentence}}\n\
+      \nDecompose this sentence into two sentences\n|||\n\n{{incoherent_first_sentence}}\
+      \ {{incoherent_second_sentence}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: decompose
+    reference: ''
+  2b0f2c7a-1426-4713-b293-e1e4d876bdfd: !Template
+    answer_choices: null
+    id: 2b0f2c7a-1426-4713-b293-e1e4d876bdfd
+    jinja: "{% if coherent_second_sentence==\"\" %}\n\nRewrite these two sentences\
+      \ as one sentence:\n\nFirst sentence: {{incoherent_first_sentence}} \n\nSecond\
+      \ sentence: {{incoherent_second_sentence}} \n\n|||\n\n{{coherent_first_sentence}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fuse_3
+    reference: ''
+  54ea85d8-d1af-4644-b787-55c0226db777: !Template
+    answer_choices: null
+    id: 54ea85d8-d1af-4644-b787-55c0226db777
+    jinja: "{% if coherent_second_sentence==\"\" %}\n\nRead this sentence:\n\n{{coherent_first_sentence}}\n\
+      \nNow, read this second sentence, that covers some of the information from the\
+      \ first:\n\n{{incoherent_second_sentence}}\n\nWrite a sentence that covers the\
+      \ information from the first sentence that is missing from the second\n|||\n\
+      \n {{incoherent_first_sentence}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: decompose remainder 2
+    reference: ''
+  62b617d2-5524-42d4-8ef1-8c2b38fa2c7e: !Template
+    answer_choices: null
+    id: 62b617d2-5524-42d4-8ef1-8c2b38fa2c7e
+    jinja: "{% if coherent_second_sentence==\"\" %}\n\nRead this sentence:\n\n{{coherent_first_sentence}}\n\
+      \nNow, read this second sentence, that covers some of the information from the\
+      \ first:\n\n{{incoherent_first_sentence}}\n\nWrite a sentence that covers the\
+      \ information from the first sentence that is missing from the second\n|||\n\
+      \n {{incoherent_second_sentence}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: decompose remainder 1
+    reference: ''
+  6ac9b065-38f3-43b6-9e6c-751a71ef1e2f: !Template
+    answer_choices: null
+    id: 6ac9b065-38f3-43b6-9e6c-751a71ef1e2f
+    jinja: "{% if discourse_type != \"PAIR_NONE\" %}\n\nPassage A: {{coherent_first_sentence}}\
+      \ {{coherent_second_sentence}}\n\nPassage B: {{incoherent_first_sentence}} {{incoherent_second_sentence}}\n\
+      \nWhich of the following discourse phenomena have been removed in order to turn\
+      \ Passage A into Passage B?\n\n{{\"1: Apposition\"}}\n\n{{\"2: Relative Clauses\"\
+      }}\n\n{{\"3: Cataphora\"}}\n\n{{\"4: Verb Phrase Coordination\"}}\n\n{{\"5:\
+      \ Anaphora\"}}\n\n{{\"6: Inner Connectives\"}}\n\n{{\"7: Both Inner Connectives\
+      \ and Anaphora\"}}\n\n{{\"8: Sentence Coordination\"}}\n\n{{\"9 Both Sentence\
+      \ Coordination and Anaphora\"}}\n\n{{\"10: Forward Connectives\"}}\n\n{{\"11:\
+      \ Discourse Connectives\"}}\n\n{{\"12: Both Discourse Connectives and Anaphora\"\
+      }}\n\nAnswer with both the option letter and phenomena name.\n|||\n\n{{\n{\"\
+      PAIR_ANAPHORA\": \"5\",\n\"PAIR_CONN\": \"11\", \n\"PAIR_CONN_ANAPHORA\": \"\
+      12\",\n\"SINGLE_APPOSITION\": \"1\",\n\"SINGLE_CATAPHORA\": \"3\",\n\"SINGLE_CONN_INNER\"\
+      : \"6\",\n\"SINGLE_CONN_INNER_ANAPHORA\": \"7\",\n\"SINGLE_CONN_START\": \"\
+      10\",\n\"SINGLE_RELATIVE\": \"2\",\n\"SINGLE_S_COORD\":\"8\",\n\"SINGLE_S_COORD_ANAPHORA\"\
+      : \"9\",\n\"SINGLE_VP_COORD\": \"4\"\n}[discourse_type]\n}}\n\n\n{% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Grammar detection 2
+    reference: ''
+  91e17ea5-91cd-4d0d-a0d2-5e3f4d06da47: !Template
+    answer_choices: null
+    id: 91e17ea5-91cd-4d0d-a0d2-5e3f4d06da47
+    jinja: "{% if coherent_second_sentence==\"\" %}\n\nDecompose the following sentence\
+      \ into two separate sentences:\n\n{{coherent_first_sentence}}\n\n|||\n\n{{incoherent_first_sentence}}\
+      \ {{incoherent_second_sentence}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: decompose 2
+    reference: ''
+  a5fb909f-894c-431d-8b1a-ab2177b726ad: !Template
+    answer_choices: null
+    id: a5fb909f-894c-431d-8b1a-ab2177b726ad
+    jinja: 'Rewrite the following two sentences so that they flow better:
+
+
+      first sentence: {{incoherent_first_sentence}}
+
+
+      Second Sentence: {{incoherent_second_sentence}}
+
+
+      |||
+
+
+      {{coherent_first_sentence}} {{coherent_second_sentence}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fuse_instruction
+    reference: ''
+  c6292146-751f-4650-8fc0-4cbf71aebcf7: !Template
+    answer_choices: null
+    id: c6292146-751f-4650-8fc0-4cbf71aebcf7
+    jinja: "{% if coherent_second_sentence==\"\" %}\nI'm doing some research, and\
+      \ found these facts:\n\n{{incoherent_first_sentence}} {{incoherent_second_sentence}}\
+      \ \n\nHow could I rewrite my facts to sound more natural?\n\n|||\n\n{{coherent_first_sentence}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fuse_4
+    reference: ''
+  cc4bb1fb-251d-4258-a0b4-4c355ff41315: !Template
+    answer_choices: null
+    id: cc4bb1fb-251d-4258-a0b4-4c355ff41315
+    jinja: "{% if discourse_type != \"PAIR_NONE\" %}\n\nPassage 1: {{incoherent_first_sentence}}\
+      \ {{incoherent_second_sentence}}\n\nPassage 2: {{coherent_first_sentence}} {{coherent_second_sentence}}\n\
+      \nWhich of the following discourse phenomena have been used to turn Passage\
+      \ 1 into Passage 2?\n\n{{\"A: Apposition\"}}\n\n{{\"B: Relative Clauses\"}}\n\
+      \n{{\"C: Cataphora\"}}\n\n{{\"D: Verb Phrase Coordination\"}}\n\n{{\"E: Anaphora\"\
+      }}\n\n{{\"F: Inner Connectives\"}}\n\n{{\"G: Both Inner Connectives and Anaphora\"\
+      }}\n\n{{\"H: Sentence Coordination\"}}\n\n{{\"I: Both Sentence Coordination\
+      \ and Anaphora\"}}\n\n{{\"J: Forward Connectives\"}}\n\n{{\"K: Discourse Connectives\"\
+      }}\n\n{{\"L: Both Discourse Connectives and Anaphora\"}}\n\nAnswer with both\
+      \ the option letter and phenomena name.\n|||\n\n{{\n{\"PAIR_ANAPHORA\": \"E\"\
+      ,\n\"PAIR_CONN\": \"K\", \n\"PAIR_CONN_ANAPHORA\": \"L\",\n\"SINGLE_APPOSITION\"\
+      : \"A\",\n\"SINGLE_CATAPHORA\": \"C\",\n\"SINGLE_CONN_INNER\": \"F\",\n\"SINGLE_CONN_INNER_ANAPHORA\"\
+      : \"G\",\n\"SINGLE_CONN_START\": \"J\",\n\"SINGLE_RELATIVE\": \"B\",\n\"SINGLE_S_COORD\"\
+      :\"H\",\n\"SINGLE_S_COORD_ANAPHORA\": \"I\",\n\"SINGLE_VP_COORD\": \"D\"\n}[discourse_type]\n\
+      }}\n\n\n{% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Grammar detection
+    reference: ''
diff --git a/promptsource/templates/discovery/discovery/templates.yaml b/promptsource/templates/discovery/discovery/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f76bdeb0f40f5a9312f80b716eefafce7d53916c
--- /dev/null
+++ b/promptsource/templates/discovery/discovery/templates.yaml
@@ -0,0 +1,272 @@
+dataset: discovery
+subset: discovery
+templates:
+  63cb3007-dc47-41c5-bdc0-a4b3ffcc1c9a: !Template
+    answer_choices: null
+    id: 63cb3007-dc47-41c5-bdc0-a4b3ffcc1c9a
+    jinja: "Which word gives a smooth transition from the first sentence to the second\
+      \ sentence?\n\n{{sentence1}}\n\n{{sentence2}} |||\n{{\n[\n  \"no connection\"\
+      ,\n  \"absolutely,\",\n  \"accordingly\",\n  \"actually,\",\n  \"additionally\"\
+      ,\n  \"admittedly,\",\n  \"afterward\",\n  \"again,\",\n  \"already,\",\n  \"\
+      also,\",\n  \"alternately,\",\n  \"alternatively\",\n  \"although,\",\n  \"\
+      altogether,\",\n  \"amazingly,\",\n  \"and\",\n  \"anyway,\",\n  \"apparently,\"\
+      ,\n  \"arguably,\",\n  \"as_a_result,\",\n  \"basically,\",\n  \"because_of_that\"\
+      ,\n  \"because_of_this\",\n  \"besides,\",\n  \"but\",\n  \"by_comparison,\"\
+      ,\n  \"by_contrast,\",\n  \"by_doing_this,\",\n  \"by_then\",\n  \"certainly,\"\
+      ,\n  \"clearly,\",\n  \"coincidentally,\",\n  \"collectively,\",\n  \"consequently\"\
+      ,\n  \"conversely\",\n  \"curiously,\",\n  \"currently,\",\n  \"elsewhere,\"\
+      ,\n  \"especially,\",\n  \"essentially,\",\n  \"eventually,\",\n  \"evidently,\"\
+      ,\n  \"finally,\",\n  \"first,\",\n  \"firstly,\",\n  \"for_example\",\n  \"\
+      for_instance\",\n  \"fortunately,\",\n  \"frankly,\",\n  \"frequently,\",\n\
+      \  \"further,\",\n  \"furthermore\",\n  \"generally,\",\n  \"gradually,\",\n\
+      \  \"happily,\",\n  \"hence,\",\n  \"here,\",\n  \"historically,\",\n  \"honestly,\"\
+      ,\n  \"hopefully,\",\n  \"however\",\n  \"ideally,\",\n  \"immediately,\",\n\
+      \  \"importantly,\",\n  \"in_contrast,\",\n  \"in_fact,\",\n  \"in_other_words\"\
+      ,\n  \"in_particular,\",\n  \"in_short,\",\n  \"in_sum,\",\n  \"in_the_end,\"\
+      ,\n  \"in_the_meantime,\",\n  \"in_turn,\",\n  \"incidentally,\",\n  \"increasingly,\"\
+      ,\n  \"indeed,\",\n  \"inevitably,\",\n  \"initially,\",\n  \"instead,\",\n\
+      \  \"interestingly,\",\n  \"ironically,\",\n  \"lastly,\",\n  \"lately,\",\n\
+      \  \"later,\",\n  \"likewise,\",\n  \"locally,\",\n  \"luckily,\",\n  \"maybe,\"\
+      ,\n  \"meaning,\",\n  \"meantime,\",\n  \"meanwhile,\",\n  \"moreover\",\n \
+      \ \"mostly,\",\n  \"namely,\",\n  \"nationally,\",\n  \"naturally,\",\n  \"\
+      nevertheless\",\n  \"next,\",\n  \"nonetheless\",\n  \"normally,\",\n  \"notably,\"\
+      ,\n  \"now,\",\n  \"obviously,\",\n  \"occasionally,\",\n  \"oddly,\",\n  \"\
+      often,\",\n  \"on_the_contrary,\",\n  \"on_the_other_hand\",\n  \"once,\",\n\
+      \  \"only,\",\n  \"optionally,\",\n  \"or,\",\n  \"originally,\",\n  \"otherwise,\"\
+      ,\n  \"overall,\",\n  \"particularly,\",\n  \"perhaps,\",\n  \"personally,\"\
+      ,\n  \"plus,\",\n  \"preferably,\",\n  \"presently,\",\n  \"presumably,\",\n\
+      \  \"previously,\",\n  \"probably,\",\n  \"rather,\",\n  \"realistically,\"\
+      ,\n  \"really,\",\n  \"recently,\",\n  \"regardless,\",\n  \"remarkably,\",\n\
+      \  \"sadly,\",\n  \"second,\",\n  \"secondly,\",\n  \"separately,\",\n  \"seriously,\"\
+      ,\n  \"significantly,\",\n  \"similarly,\",\n  \"simultaneously\",\n  \"slowly,\"\
+      ,\n  \"so,\",\n  \"sometimes,\",\n  \"soon,\",\n  \"specifically,\",\n  \"still,\"\
+      ,\n  \"strangely,\",\n  \"subsequently,\",\n  \"suddenly,\",\n  \"supposedly,\"\
+      ,\n  \"surely,\",\n  \"surprisingly,\",\n  \"technically,\",\n  \"thankfully,\"\
+      ,\n  \"then,\",\n  \"theoretically,\",\n  \"thereafter,\",\n  \"thereby,\",\n\
+      \  \"therefore\",\n  \"third,\",\n  \"thirdly,\",\n  \"this,\",\n  \"though,\"\
+      ,\n  \"thus,\",\n  \"together,\",\n  \"traditionally,\",\n  \"truly,\",\n  \"\
+      truthfully,\",\n  \"typically,\",\n  \"ultimately,\",\n  \"undoubtedly,\",\n\
+      \  \"unfortunately,\",\n  \"unsurprisingly,\",\n  \"usually,\",\n  \"well,\"\
+      ,\n  \"yet,\"\n][label].replace(\"_\",\" \")\n}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: connector
+    reference: ''
+  a4a7dad2-18b0-45b4-bd93-537e13e435cc: !Template
+    answer_choices: null
+    id: a4a7dad2-18b0-45b4-bd93-537e13e435cc
+    jinja: "What word could be added to the second sentence such that both sentences\
+      \ together convey a clear argument?\n\n{{sentence1}}\n\n{{sentence2}} |||\n\
+      {{\n[\n  \"no connection\",\n  \"absolutely,\",\n  \"accordingly\",\n  \"actually,\"\
+      ,\n  \"additionally\",\n  \"admittedly,\",\n  \"afterward\",\n  \"again,\",\n\
+      \  \"already,\",\n  \"also,\",\n  \"alternately,\",\n  \"alternatively\",\n\
+      \  \"although,\",\n  \"altogether,\",\n  \"amazingly,\",\n  \"and\",\n  \"anyway,\"\
+      ,\n  \"apparently,\",\n  \"arguably,\",\n  \"as_a_result,\",\n  \"basically,\"\
+      ,\n  \"because_of_that\",\n  \"because_of_this\",\n  \"besides,\",\n  \"but\"\
+      ,\n  \"by_comparison,\",\n  \"by_contrast,\",\n  \"by_doing_this,\",\n  \"by_then\"\
+      ,\n  \"certainly,\",\n  \"clearly,\",\n  \"coincidentally,\",\n  \"collectively,\"\
+      ,\n  \"consequently\",\n  \"conversely\",\n  \"curiously,\",\n  \"currently,\"\
+      ,\n  \"elsewhere,\",\n  \"especially,\",\n  \"essentially,\",\n  \"eventually,\"\
+      ,\n  \"evidently,\",\n  \"finally,\",\n  \"first,\",\n  \"firstly,\",\n  \"\
+      for_example\",\n  \"for_instance\",\n  \"fortunately,\",\n  \"frankly,\",\n\
+      \  \"frequently,\",\n  \"further,\",\n  \"furthermore\",\n  \"generally,\",\n\
+      \  \"gradually,\",\n  \"happily,\",\n  \"hence,\",\n  \"here,\",\n  \"historically,\"\
+      ,\n  \"honestly,\",\n  \"hopefully,\",\n  \"however\",\n  \"ideally,\",\n  \"\
+      immediately,\",\n  \"importantly,\",\n  \"in_contrast,\",\n  \"in_fact,\",\n\
+      \  \"in_other_words\",\n  \"in_particular,\",\n  \"in_short,\",\n  \"in_sum,\"\
+      ,\n  \"in_the_end,\",\n  \"in_the_meantime,\",\n  \"in_turn,\",\n  \"incidentally,\"\
+      ,\n  \"increasingly,\",\n  \"indeed,\",\n  \"inevitably,\",\n  \"initially,\"\
+      ,\n  \"instead,\",\n  \"interestingly,\",\n  \"ironically,\",\n  \"lastly,\"\
+      ,\n  \"lately,\",\n  \"later,\",\n  \"likewise,\",\n  \"locally,\",\n  \"luckily,\"\
+      ,\n  \"maybe,\",\n  \"meaning,\",\n  \"meantime,\",\n  \"meanwhile,\",\n  \"\
+      moreover\",\n  \"mostly,\",\n  \"namely,\",\n  \"nationally,\",\n  \"naturally,\"\
+      ,\n  \"nevertheless\",\n  \"next,\",\n  \"nonetheless\",\n  \"normally,\",\n\
+      \  \"notably,\",\n  \"now,\",\n  \"obviously,\",\n  \"occasionally,\",\n  \"\
+      oddly,\",\n  \"often,\",\n  \"on_the_contrary,\",\n  \"on_the_other_hand\",\n\
+      \  \"once,\",\n  \"only,\",\n  \"optionally,\",\n  \"or,\",\n  \"originally,\"\
+      ,\n  \"otherwise,\",\n  \"overall,\",\n  \"particularly,\",\n  \"perhaps,\"\
+      ,\n  \"personally,\",\n  \"plus,\",\n  \"preferably,\",\n  \"presently,\",\n\
+      \  \"presumably,\",\n  \"previously,\",\n  \"probably,\",\n  \"rather,\",\n\
+      \  \"realistically,\",\n  \"really,\",\n  \"recently,\",\n  \"regardless,\"\
+      ,\n  \"remarkably,\",\n  \"sadly,\",\n  \"second,\",\n  \"secondly,\",\n  \"\
+      separately,\",\n  \"seriously,\",\n  \"significantly,\",\n  \"similarly,\",\n\
+      \  \"simultaneously\",\n  \"slowly,\",\n  \"so,\",\n  \"sometimes,\",\n  \"\
+      soon,\",\n  \"specifically,\",\n  \"still,\",\n  \"strangely,\",\n  \"subsequently,\"\
+      ,\n  \"suddenly,\",\n  \"supposedly,\",\n  \"surely,\",\n  \"surprisingly,\"\
+      ,\n  \"technically,\",\n  \"thankfully,\",\n  \"then,\",\n  \"theoretically,\"\
+      ,\n  \"thereafter,\",\n  \"thereby,\",\n  \"therefore\",\n  \"third,\",\n  \"\
+      thirdly,\",\n  \"this,\",\n  \"though,\",\n  \"thus,\",\n  \"together,\",\n\
+      \  \"traditionally,\",\n  \"truly,\",\n  \"truthfully,\",\n  \"typically,\"\
+      ,\n  \"ultimately,\",\n  \"undoubtedly,\",\n  \"unfortunately,\",\n  \"unsurprisingly,\"\
+      ,\n  \"usually,\",\n  \"well,\",\n  \"yet,\"\n][label].replace(\"_\",\" \")\n\
+      }}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: make_sense
+    reference: ''
+  cf87d7ad-9b78-4ead-9e0e-ae4dc12b91d0: !Template
+    answer_choices: null
+    id: cf87d7ad-9b78-4ead-9e0e-ae4dc12b91d0
+    jinja: "In essay writing, it is important to avoid abrupt sentences. What word\
+      \ would you add to the beginning of the second sentence such that there is a\
+      \ smooth transition from the first sentence?\n\n{{sentence1}}\n\n{{sentence2}}\
+      \ |||\n{{\n[\n  \"no connection\",\n  \"absolutely,\",\n  \"accordingly\",\n\
+      \  \"actually,\",\n  \"additionally\",\n  \"admittedly,\",\n  \"afterward\"\
+      ,\n  \"again,\",\n  \"already,\",\n  \"also,\",\n  \"alternately,\",\n  \"alternatively\"\
+      ,\n  \"although,\",\n  \"altogether,\",\n  \"amazingly,\",\n  \"and\",\n  \"\
+      anyway,\",\n  \"apparently,\",\n  \"arguably,\",\n  \"as_a_result,\",\n  \"\
+      basically,\",\n  \"because_of_that\",\n  \"because_of_this\",\n  \"besides,\"\
+      ,\n  \"but\",\n  \"by_comparison,\",\n  \"by_contrast,\",\n  \"by_doing_this,\"\
+      ,\n  \"by_then\",\n  \"certainly,\",\n  \"clearly,\",\n  \"coincidentally,\"\
+      ,\n  \"collectively,\",\n  \"consequently\",\n  \"conversely\",\n  \"curiously,\"\
+      ,\n  \"currently,\",\n  \"elsewhere,\",\n  \"especially,\",\n  \"essentially,\"\
+      ,\n  \"eventually,\",\n  \"evidently,\",\n  \"finally,\",\n  \"first,\",\n \
+      \ \"firstly,\",\n  \"for_example\",\n  \"for_instance\",\n  \"fortunately,\"\
+      ,\n  \"frankly,\",\n  \"frequently,\",\n  \"further,\",\n  \"furthermore\",\n\
+      \  \"generally,\",\n  \"gradually,\",\n  \"happily,\",\n  \"hence,\",\n  \"\
+      here,\",\n  \"historically,\",\n  \"honestly,\",\n  \"hopefully,\",\n  \"however\"\
+      ,\n  \"ideally,\",\n  \"immediately,\",\n  \"importantly,\",\n  \"in_contrast,\"\
+      ,\n  \"in_fact,\",\n  \"in_other_words\",\n  \"in_particular,\",\n  \"in_short,\"\
+      ,\n  \"in_sum,\",\n  \"in_the_end,\",\n  \"in_the_meantime,\",\n  \"in_turn,\"\
+      ,\n  \"incidentally,\",\n  \"increasingly,\",\n  \"indeed,\",\n  \"inevitably,\"\
+      ,\n  \"initially,\",\n  \"instead,\",\n  \"interestingly,\",\n  \"ironically,\"\
+      ,\n  \"lastly,\",\n  \"lately,\",\n  \"later,\",\n  \"likewise,\",\n  \"locally,\"\
+      ,\n  \"luckily,\",\n  \"maybe,\",\n  \"meaning,\",\n  \"meantime,\",\n  \"meanwhile,\"\
+      ,\n  \"moreover\",\n  \"mostly,\",\n  \"namely,\",\n  \"nationally,\",\n  \"\
+      naturally,\",\n  \"nevertheless\",\n  \"next,\",\n  \"nonetheless\",\n  \"normally,\"\
+      ,\n  \"notably,\",\n  \"now,\",\n  \"obviously,\",\n  \"occasionally,\",\n \
+      \ \"oddly,\",\n  \"often,\",\n  \"on_the_contrary,\",\n  \"on_the_other_hand\"\
+      ,\n  \"once,\",\n  \"only,\",\n  \"optionally,\",\n  \"or,\",\n  \"originally,\"\
+      ,\n  \"otherwise,\",\n  \"overall,\",\n  \"particularly,\",\n  \"perhaps,\"\
+      ,\n  \"personally,\",\n  \"plus,\",\n  \"preferably,\",\n  \"presently,\",\n\
+      \  \"presumably,\",\n  \"previously,\",\n  \"probably,\",\n  \"rather,\",\n\
+      \  \"realistically,\",\n  \"really,\",\n  \"recently,\",\n  \"regardless,\"\
+      ,\n  \"remarkably,\",\n  \"sadly,\",\n  \"second,\",\n  \"secondly,\",\n  \"\
+      separately,\",\n  \"seriously,\",\n  \"significantly,\",\n  \"similarly,\",\n\
+      \  \"simultaneously\",\n  \"slowly,\",\n  \"so,\",\n  \"sometimes,\",\n  \"\
+      soon,\",\n  \"specifically,\",\n  \"still,\",\n  \"strangely,\",\n  \"subsequently,\"\
+      ,\n  \"suddenly,\",\n  \"supposedly,\",\n  \"surely,\",\n  \"surprisingly,\"\
+      ,\n  \"technically,\",\n  \"thankfully,\",\n  \"then,\",\n  \"theoretically,\"\
+      ,\n  \"thereafter,\",\n  \"thereby,\",\n  \"therefore\",\n  \"third,\",\n  \"\
+      thirdly,\",\n  \"this,\",\n  \"though,\",\n  \"thus,\",\n  \"together,\",\n\
+      \  \"traditionally,\",\n  \"truly,\",\n  \"truthfully,\",\n  \"typically,\"\
+      ,\n  \"ultimately,\",\n  \"undoubtedly,\",\n  \"unfortunately,\",\n  \"unsurprisingly,\"\
+      ,\n  \"usually,\",\n  \"well,\",\n  \"yet,\"\n][label].replace(\"_\",\" \")\n\
+      }}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: correction
+    reference: ''
+  e5a06323-697a-43e2-953d-f264dd4c2f84: !Template
+    answer_choices: null
+    id: e5a06323-697a-43e2-953d-f264dd4c2f84
+    jinja: "Which is the best discourse marker from the first sentence to the second\
+      \ sentence below?\n\n{{sentence1}}\n\n{{sentence2}} |||\n{{\n[\n  \"no connection\"\
+      ,\n  \"absolutely,\",\n  \"accordingly\",\n  \"actually,\",\n  \"additionally\"\
+      ,\n  \"admittedly,\",\n  \"afterward\",\n  \"again,\",\n  \"already,\",\n  \"\
+      also,\",\n  \"alternately,\",\n  \"alternatively\",\n  \"although,\",\n  \"\
+      altogether,\",\n  \"amazingly,\",\n  \"and\",\n  \"anyway,\",\n  \"apparently,\"\
+      ,\n  \"arguably,\",\n  \"as_a_result,\",\n  \"basically,\",\n  \"because_of_that\"\
+      ,\n  \"because_of_this\",\n  \"besides,\",\n  \"but\",\n  \"by_comparison,\"\
+      ,\n  \"by_contrast,\",\n  \"by_doing_this,\",\n  \"by_then\",\n  \"certainly,\"\
+      ,\n  \"clearly,\",\n  \"coincidentally,\",\n  \"collectively,\",\n  \"consequently\"\
+      ,\n  \"conversely\",\n  \"curiously,\",\n  \"currently,\",\n  \"elsewhere,\"\
+      ,\n  \"especially,\",\n  \"essentially,\",\n  \"eventually,\",\n  \"evidently,\"\
+      ,\n  \"finally,\",\n  \"first,\",\n  \"firstly,\",\n  \"for_example\",\n  \"\
+      for_instance\",\n  \"fortunately,\",\n  \"frankly,\",\n  \"frequently,\",\n\
+      \  \"further,\",\n  \"furthermore\",\n  \"generally,\",\n  \"gradually,\",\n\
+      \  \"happily,\",\n  \"hence,\",\n  \"here,\",\n  \"historically,\",\n  \"honestly,\"\
+      ,\n  \"hopefully,\",\n  \"however\",\n  \"ideally,\",\n  \"immediately,\",\n\
+      \  \"importantly,\",\n  \"in_contrast,\",\n  \"in_fact,\",\n  \"in_other_words\"\
+      ,\n  \"in_particular,\",\n  \"in_short,\",\n  \"in_sum,\",\n  \"in_the_end,\"\
+      ,\n  \"in_the_meantime,\",\n  \"in_turn,\",\n  \"incidentally,\",\n  \"increasingly,\"\
+      ,\n  \"indeed,\",\n  \"inevitably,\",\n  \"initially,\",\n  \"instead,\",\n\
+      \  \"interestingly,\",\n  \"ironically,\",\n  \"lastly,\",\n  \"lately,\",\n\
+      \  \"later,\",\n  \"likewise,\",\n  \"locally,\",\n  \"luckily,\",\n  \"maybe,\"\
+      ,\n  \"meaning,\",\n  \"meantime,\",\n  \"meanwhile,\",\n  \"moreover\",\n \
+      \ \"mostly,\",\n  \"namely,\",\n  \"nationally,\",\n  \"naturally,\",\n  \"\
+      nevertheless\",\n  \"next,\",\n  \"nonetheless\",\n  \"normally,\",\n  \"notably,\"\
+      ,\n  \"now,\",\n  \"obviously,\",\n  \"occasionally,\",\n  \"oddly,\",\n  \"\
+      often,\",\n  \"on_the_contrary,\",\n  \"on_the_other_hand\",\n  \"once,\",\n\
+      \  \"only,\",\n  \"optionally,\",\n  \"or,\",\n  \"originally,\",\n  \"otherwise,\"\
+      ,\n  \"overall,\",\n  \"particularly,\",\n  \"perhaps,\",\n  \"personally,\"\
+      ,\n  \"plus,\",\n  \"preferably,\",\n  \"presently,\",\n  \"presumably,\",\n\
+      \  \"previously,\",\n  \"probably,\",\n  \"rather,\",\n  \"realistically,\"\
+      ,\n  \"really,\",\n  \"recently,\",\n  \"regardless,\",\n  \"remarkably,\",\n\
+      \  \"sadly,\",\n  \"second,\",\n  \"secondly,\",\n  \"separately,\",\n  \"seriously,\"\
+      ,\n  \"significantly,\",\n  \"similarly,\",\n  \"simultaneously\",\n  \"slowly,\"\
+      ,\n  \"so,\",\n  \"sometimes,\",\n  \"soon,\",\n  \"specifically,\",\n  \"still,\"\
+      ,\n  \"strangely,\",\n  \"subsequently,\",\n  \"suddenly,\",\n  \"supposedly,\"\
+      ,\n  \"surely,\",\n  \"surprisingly,\",\n  \"technically,\",\n  \"thankfully,\"\
+      ,\n  \"then,\",\n  \"theoretically,\",\n  \"thereafter,\",\n  \"thereby,\",\n\
+      \  \"therefore\",\n  \"third,\",\n  \"thirdly,\",\n  \"this,\",\n  \"though,\"\
+      ,\n  \"thus,\",\n  \"together,\",\n  \"traditionally,\",\n  \"truly,\",\n  \"\
+      truthfully,\",\n  \"typically,\",\n  \"ultimately,\",\n  \"undoubtedly,\",\n\
+      \  \"unfortunately,\",\n  \"unsurprisingly,\",\n  \"usually,\",\n  \"well,\"\
+      ,\n  \"yet,\"\n][label].replace(\"_\",\" \")\n}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: discourse
+    reference: ''
+  f57ab7ab-bf87-43c7-8eb9-48ea840345f6: !Template
+    answer_choices: null
+    id: f57ab7ab-bf87-43c7-8eb9-48ea840345f6
+    jinja: "What word signifies the shift or extension in meaning from the first to\
+      \ the second sentence? \n\n{{sentence1}}\n\n{{sentence2}} |||\n{{\n[\n  \"no\
+      \ connection\",\n  \"absolutely,\",\n  \"accordingly\",\n  \"actually,\",\n\
+      \  \"additionally\",\n  \"admittedly,\",\n  \"afterward\",\n  \"again,\",\n\
+      \  \"already,\",\n  \"also,\",\n  \"alternately,\",\n  \"alternatively\",\n\
+      \  \"although,\",\n  \"altogether,\",\n  \"amazingly,\",\n  \"and\",\n  \"anyway,\"\
+      ,\n  \"apparently,\",\n  \"arguably,\",\n  \"as_a_result,\",\n  \"basically,\"\
+      ,\n  \"because_of_that\",\n  \"because_of_this\",\n  \"besides,\",\n  \"but\"\
+      ,\n  \"by_comparison,\",\n  \"by_contrast,\",\n  \"by_doing_this,\",\n  \"by_then\"\
+      ,\n  \"certainly,\",\n  \"clearly,\",\n  \"coincidentally,\",\n  \"collectively,\"\
+      ,\n  \"consequently\",\n  \"conversely\",\n  \"curiously,\",\n  \"currently,\"\
+      ,\n  \"elsewhere,\",\n  \"especially,\",\n  \"essentially,\",\n  \"eventually,\"\
+      ,\n  \"evidently,\",\n  \"finally,\",\n  \"first,\",\n  \"firstly,\",\n  \"\
+      for_example\",\n  \"for_instance\",\n  \"fortunately,\",\n  \"frankly,\",\n\
+      \  \"frequently,\",\n  \"further,\",\n  \"furthermore\",\n  \"generally,\",\n\
+      \  \"gradually,\",\n  \"happily,\",\n  \"hence,\",\n  \"here,\",\n  \"historically,\"\
+      ,\n  \"honestly,\",\n  \"hopefully,\",\n  \"however\",\n  \"ideally,\",\n  \"\
+      immediately,\",\n  \"importantly,\",\n  \"in_contrast,\",\n  \"in_fact,\",\n\
+      \  \"in_other_words\",\n  \"in_particular,\",\n  \"in_short,\",\n  \"in_sum,\"\
+      ,\n  \"in_the_end,\",\n  \"in_the_meantime,\",\n  \"in_turn,\",\n  \"incidentally,\"\
+      ,\n  \"increasingly,\",\n  \"indeed,\",\n  \"inevitably,\",\n  \"initially,\"\
+      ,\n  \"instead,\",\n  \"interestingly,\",\n  \"ironically,\",\n  \"lastly,\"\
+      ,\n  \"lately,\",\n  \"later,\",\n  \"likewise,\",\n  \"locally,\",\n  \"luckily,\"\
+      ,\n  \"maybe,\",\n  \"meaning,\",\n  \"meantime,\",\n  \"meanwhile,\",\n  \"\
+      moreover\",\n  \"mostly,\",\n  \"namely,\",\n  \"nationally,\",\n  \"naturally,\"\
+      ,\n  \"nevertheless\",\n  \"next,\",\n  \"nonetheless\",\n  \"normally,\",\n\
+      \  \"notably,\",\n  \"now,\",\n  \"obviously,\",\n  \"occasionally,\",\n  \"\
+      oddly,\",\n  \"often,\",\n  \"on_the_contrary,\",\n  \"on_the_other_hand\",\n\
+      \  \"once,\",\n  \"only,\",\n  \"optionally,\",\n  \"or,\",\n  \"originally,\"\
+      ,\n  \"otherwise,\",\n  \"overall,\",\n  \"particularly,\",\n  \"perhaps,\"\
+      ,\n  \"personally,\",\n  \"plus,\",\n  \"preferably,\",\n  \"presently,\",\n\
+      \  \"presumably,\",\n  \"previously,\",\n  \"probably,\",\n  \"rather,\",\n\
+      \  \"realistically,\",\n  \"really,\",\n  \"recently,\",\n  \"regardless,\"\
+      ,\n  \"remarkably,\",\n  \"sadly,\",\n  \"second,\",\n  \"secondly,\",\n  \"\
+      separately,\",\n  \"seriously,\",\n  \"significantly,\",\n  \"similarly,\",\n\
+      \  \"simultaneously\",\n  \"slowly,\",\n  \"so,\",\n  \"sometimes,\",\n  \"\
+      soon,\",\n  \"specifically,\",\n  \"still,\",\n  \"strangely,\",\n  \"subsequently,\"\
+      ,\n  \"suddenly,\",\n  \"supposedly,\",\n  \"surely,\",\n  \"surprisingly,\"\
+      ,\n  \"technically,\",\n  \"thankfully,\",\n  \"then,\",\n  \"theoretically,\"\
+      ,\n  \"thereafter,\",\n  \"thereby,\",\n  \"therefore\",\n  \"third,\",\n  \"\
+      thirdly,\",\n  \"this,\",\n  \"though,\",\n  \"thus,\",\n  \"together,\",\n\
+      \  \"traditionally,\",\n  \"truly,\",\n  \"truthfully,\",\n  \"typically,\"\
+      ,\n  \"ultimately,\",\n  \"undoubtedly,\",\n  \"unfortunately,\",\n  \"unsurprisingly,\"\
+      ,\n  \"usually,\",\n  \"well,\",\n  \"yet,\"\n][label].replace(\"_\",\" \")\n\
+      }}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: transition
+    reference: ''
diff --git a/promptsource/templates/docred/templates.yaml b/promptsource/templates/docred/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7370a070870611d304465ab6de008c8e966c5164
--- /dev/null
+++ b/promptsource/templates/docred/templates.yaml
@@ -0,0 +1,188 @@
+dataset: docred
+templates:
+  02af700c-e9e9-4a84-b75c-5fb29a5b7993: !Template
+    answer_choices: null
+    id: 02af700c-e9e9-4a84-b75c-5fb29a5b7993
+    jinja: "Read the following text and answer the questions.\n\nText:\n{% for sent\
+      \ in sents -%}\n{{ sent | join(\" \") }}\n{%- endfor %} \n\nQuestion:\nFrom\
+      \ the above text, find people, locations, organizations, times, numbers, and\
+      \ miscellaneous.\n|||\n{%- set people = [] -%} \n{%- for ners in vertexSet -%}\n\
+      {%- for ner in ners if ner['type'] == 'PER' -%}\n{{people.append(ner['name'])\
+      \ | default(\"\", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if people %}\n\
+      {{\"People: \"}}{{ people | unique | join(\", \")}}{{\".\"}}\n{% endif %}\n\n\
+      {%- set locations = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in ners\
+      \ if ner['type'] == 'LOC' -%}\n{{locations.append(ner['name']) | default(\"\"\
+      , True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if locations %}\n{{\"Locations:\
+      \ \"}}{{ locations | unique | join(\", \")}}{{\".\"}}\n{% endif %}\n\n{%- set\
+      \ orgs = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in ners if ner['type']\
+      \ == 'ORG' -%}\n{{orgs.append(ner['name']) | default(\"\", True)}}\n{%- endfor\
+      \ -%}\n{%- endfor -%}\n{% if orgs %}\n{{\"Organizations: \"}}{{ orgs | unique\
+      \ | join(\", \")}}{{\".\"}}\n{% endif %}\n\n{%- set times = [] -%} \n{%- for\
+      \ ners in vertexSet -%}\n{%- for ner in ners if ner['type'] == 'TIME' -%}\n\
+      {{times.append(ner['name']) | default(\"\", True)}}\n{%- endfor -%}\n{%- endfor\
+      \ -%}\n{% if times %}\n{{\"Times: \"}}{{ times | unique | join(\", \")}}{{\"\
+      .\"}}\n{% endif %}\n\n{%- set numbers = [] -%} \n{%- for ners in vertexSet -%}\n\
+      {%- for ner in ners if ner['type'] == 'NUM' -%}\n{{numbers.append(ner['name'])\
+      \ | default(\"\", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if numbers %}\n\
+      {{\"Numbers: \"}}{{ numbers | unique | join(\", \")}}{{\".\"}}\n{% endif %}\n\
+      \n{%- set miscs = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in ners\
+      \ if ner['type'] == 'MISC' -%}\n{{miscs.append(ner['name']) | default(\"\",\
+      \ True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if miscs %}\n{{\"Miscellaneous:\
+      \ \"}}{{ miscs | unique | join(\", \")}}{{\".\"}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: type-to-entity
+    reference: ''
+  3ab9cfc9-3ba3-41dd-959c-60182def11af: !Template
+    answer_choices: null
+    id: 3ab9cfc9-3ba3-41dd-959c-60182def11af
+    jinja: "Read the following text and answer the questions.\n\nText:\n{% for sent\
+      \ in sents -%}\n{{ sent | join(\" \") }}\n{%- endfor %} \n\nQuestion:\nAssign\
+      \ an entity type to the following entities. The choices are PER (Person), LOC\
+      \ (Location), ORG (Organization), TIME (Time), NUM (Number), and MISC (Miscellaneous).\n\
+      \nExample: \n{%- set names = [] -%}\n{%- set types = [] -%}\n{% for ners in\
+      \ vertexSet[:1] %}\n{% for ner in ners if ner['name'] not in names %}\n{{ names.append(ner['name'])\
+      \ | default(\"\", True) }} \n{{ types.append(ner['type']) | default(\"\", True)\
+      \ }} \n{% endfor %}\n{% endfor %}\n{% for name, type in zip(names, types) %}\n\
+      {{name}}{{\": \"}}{{type}}\n{% endfor %}\n\nNow do the same with below:\n\n\
+      {%- set names = [] -%}\n{%- set types = [] -%}\n{% for ners in vertexSet[1:]\
+      \ %}\n{% for ner in ners if ner['name'] not in names %}\n{{ names.append(ner['name'])\
+      \ | default(\"\", True) }} \n{{ types.append(ner['type']) | default(\"\", True)\
+      \ }} \n{% endfor %}\n{% endfor %}\n{% for name, type in zip(names, types) %}\n\
+      {{name}}{{\": \"}}\n{% endfor %}\n|||\n{% for name, type in zip(names, types)\
+      \ %}\n{{name}}{{\": \"}}{{type}}\n{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: ner-type
+    reference: ''
+  412b482e-185b-48da-8aef-4a93a42e779d: !Template
+    answer_choices: null
+    id: 412b482e-185b-48da-8aef-4a93a42e779d
+    jinja: "{%- set locations = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner\
+      \ in ners if ner['type'] == 'LOC' -%}\n{{locations.append(ner['name']) | default(\"\
+      \", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if locations %}\nFind all of\
+      \ the locations in the text below. \n\n{% for sent in sents -%}\n{{ sent | join(\"\
+      \ \") }}\n{%- endfor -%} \n|||\n{{ locations| unique | join(\", \")}}{{\".\"\
+      }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find-all-locations
+    reference: ''
+  5361a8ba-8ced-4417-be21-ba13fa319e9f: !Template
+    answer_choices: null
+    id: 5361a8ba-8ced-4417-be21-ba13fa319e9f
+    jinja: "{%- set organizations = [] -%} \n{%- for ners in vertexSet -%}\n{%- for\
+      \ ner in ners if ner['type'] == 'ORG' -%}\n{{organizations.append(ner['name'])\
+      \ | default(\"\", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if organizations\
+      \ %}\nFind all of the organizations in the text below.\n\n{% for sent in sents\
+      \ -%}\n{{ sent | join(\" \") }}\n{%- endfor -%} \n|||\n{{ organizations| unique\
+      \ | join(\", \")}}{{\".\"}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find-all-organizations
+    reference: ''
+  6efa4d1a-3368-4b12-9e30-588b53801077: !Template
+    answer_choices: null
+    id: 6efa4d1a-3368-4b12-9e30-588b53801077
+    jinja: "{% if labels['relation_text'] %}\nGiven the following entities and relations,\
+      \ make a creative text. The types are PER (Person), LOC (Location), ORG (Organization),\
+      \ TIME (Time), NUM (Number), and MISC (Miscellaneous).\n\n{% for  head, tail,\
+      \ relation in zip(labels['head'], labels['tail'], labels['relation_text']) %}\n\
+      head: {{vertexSet[head][0]['name']}}, tail: {{vertexSet[tail][0]['name']}},\
+      \ relation: {{relation}}\n{% endfor %}\n|||\n{% for sent in sents -%}\n{{ sent\
+      \ | join(\" \") }}\n{%- endfor -%} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: relation-to-text
+    reference: ''
+  7458c4ed-b527-4ad5-8a04-7c87d887d5e6: !Template
+    answer_choices: null
+    id: 7458c4ed-b527-4ad5-8a04-7c87d887d5e6
+    jinja: "Given the following entities and their types, make a creative text. The\
+      \ types are PER (Person), LOC (Location), ORG (Organization), TIME (Time), NUM\
+      \ (Number), and MISC (Miscellaneous).\n\n{%- set names = [] -%}\n{%- set types\
+      \ = [] -%}\n{% for ners in vertexSet %}\n{% for ner in ners if ner['name'] not\
+      \ in names %}\n{{ names.append(ner['name']) | default(\"\", True) }} \n{{ types.append(ner['type'])\
+      \ | default(\"\", True) }} \n{% endfor %}\n{% endfor %}\n{% for name, type in\
+      \ zip(names, types) %}\n{{name}}{{\": \"}}{{type}}\n{% endfor %}\n|||\n{% for\
+      \ sent in sents -%}\n{{ sent | join(\" \") }}\n{%- endfor -%} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: ner-to-text
+    reference: ''
+  7f6bb96c-3661-4369-8d75-6eca07f15e6d: !Template
+    answer_choices: null
+    id: 7f6bb96c-3661-4369-8d75-6eca07f15e6d
+    jinja: "{%- set times = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in\
+      \ ners if ner['type'] == 'TIME' -%}\n{{times.append(ner['name']) | default(\"\
+      \", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if times %}\nFind all of the\
+      \ times in the text below. \n\n{% for sent in sents -%}\n{{ sent | join(\" \"\
+      ) }}\n{%- endfor -%} \n|||\n{{ times| unique | join(\", \")}}{{\".\"}}\n{% endif\
+      \ %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find-all-times
+    reference: ''
+  9ca601e9-bf97-4fba-90c9-ca502247d034: !Template
+    answer_choices: null
+    id: 9ca601e9-bf97-4fba-90c9-ca502247d034
+    jinja: "{%- set people = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner in\
+      \ ners if ner['type'] == 'PER' -%}\n{{people.append(ner['name']) | default(\"\
+      \", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if people %}\nFind all of the\
+      \ people in the text below. \n\n{% for sent in sents -%}\n{{ sent | join(\"\
+      \ \") }}\n{%- endfor -%} \n|||\n{{ people| unique | join(\", \")}}{{\".\"}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find-all-people
+    reference: ''
+  9effc9d0-bf50-4dbb-9813-02a021e8da33: !Template
+    answer_choices: null
+    id: 9effc9d0-bf50-4dbb-9813-02a021e8da33
+    jinja: "{% if labels['relation_text'] %}\n\nRead the following text and answer\
+      \ the questions.\n\nText:\n{%- for sent in sents -%}\n{{ sent | join(\" \")\
+      \ }}\n{%- endfor -%} \n\nQuestion: Find the named entities above and their relationships.\n\
+      \n{{\"For example, \"}}\n\n{% for  head, tail, relation in zip(labels['head'][:1],\
+      \ labels['tail'][:1], labels['relation_text'][:1]) -%}\nhead: {{vertexSet[head][0]['name']}},\
+      \ tail: {{vertexSet[tail][0]['name']}}, relation: {{relation}}\n{%- endfor -%}\n\
+      {{\".\"}}\n|||\n{% for  head, tail, relation in zip(labels['head'][1:], labels['tail'][1:],\
+      \ labels['relation_text'][1:]) %}\nhead: {{vertexSet[head][0]['name']}}, tail:\
+      \ {{vertexSet[tail][0]['name']}}, relation: {{relation}}\n{% endfor %}\n{% endif\
+      \ %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: relation
+    reference: ''
+  a31dc527-a5b9-4411-a600-ea2bbe22a9d3: !Template
+    answer_choices: null
+    id: a31dc527-a5b9-4411-a600-ea2bbe22a9d3
+    jinja: "{%- set numbers = [] -%} \n{%- for ners in vertexSet -%}\n{%- for ner\
+      \ in ners if ner['type'] == 'NUM' -%}\n{{numbers.append(ner['name']) | default(\"\
+      \", True)}}\n{%- endfor -%}\n{%- endfor -%}\n{% if numbers %}\nFind all of the\
+      \ numbers in the text below. Please do not include years.\n\n{% for sent in\
+      \ sents -%}\n{{ sent | join(\" \") }}\n{%- endfor -%} \n|||\n{{ numbers| unique\
+      \ | join(\", \")}}{{\".\"}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find-all-numbers
+    reference: ''
diff --git a/promptsource/templates/dream/templates.yaml b/promptsource/templates/dream/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..79a02983f20a5b5bed48a8d932516d13d7f7f4e0
--- /dev/null
+++ b/promptsource/templates/dream/templates.yaml
@@ -0,0 +1,86 @@
+dataset: dream
+templates:
+  024906f3-2503-451f-a0ce-2c9faf90e6c5: !Template
+    answer_choices: null
+    id: 024906f3-2503-451f-a0ce-2c9faf90e6c5
+    jinja: 'Read the below conversation.
+
+
+      {{dialogue[:-1] | join("\n\n")}}
+
+
+      What would the listener say?
+
+      |||
+
+      {{dialogue[-1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate-last-utterance
+    reference: ''
+  5c53fe97-b8b9-4c91-bd75-b3f8e056bd01: !Template
+    answer_choices: null
+    id: 5c53fe97-b8b9-4c91-bd75-b3f8e056bd01
+    jinja: 'Given the question "{{question}}" and the answer "{{answer}}", write a
+      conversation that might have happened.
+
+      |||
+
+      {{dialogue | join("\n\n")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: answer-to-dialogue
+    reference: ''
+  70865a35-1db3-45bc-8b08-baf1d9d0be9d: !Template
+    answer_choices: null
+    id: 70865a35-1db3-45bc-8b08-baf1d9d0be9d
+    jinja: '{{dialogue[1:] | join("\n\n")}}
+
+
+      What was said before this conversation?
+
+      |||
+
+      {{dialogue[0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate-first-utterance
+    reference: ''
+  8f962580-1611-4982-b567-05939c5012ff: !Template
+    answer_choices: '{{choice | join("|||")}}'
+    id: 8f962580-1611-4982-b567-05939c5012ff
+    jinja: "Dialogue:\n\n{{dialogue | join(\"\\n\\n\")}}\n\nQuestion: {{question}}\
+      \ \n\n- {{answer_choices[0]}}\n\n- {{answer_choices[1]}}\n\n- {{answer_choices[2]}}\n\
+      |||\n{{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: baseline
+    reference: https://dataset.org/dream/
+  d4687975-664d-46ac-b13b-482a35a61ab3: !Template
+    answer_choices: '{{choice | join("|||")}}'
+    id: d4687975-664d-46ac-b13b-482a35a61ab3
+    jinja: "Read the following conversation and answer the question.\n\n{{dialogue\
+      \ | join(\"\\n\\n\")}}\n\nQuestion: {{question}} \n\n- {{answer_choices[0]}}\n\
+      \n- {{answer_choices[1]}}\n\n- {{answer_choices[2]}}\n|||\n{{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: read_the_following_conversation_and_answer_the_question
+    reference: ''
diff --git a/promptsource/templates/drop/templates.yaml b/promptsource/templates/drop/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b43e2f9041e9b8909bc19e63ba2ec36e887e376e
--- /dev/null
+++ b/promptsource/templates/drop/templates.yaml
@@ -0,0 +1,96 @@
+dataset: drop
+templates:
+  350e0c24-b10c-4156-9053-a0b2d4af4214: !Template
+    answer_choices: null
+    id: 350e0c24-b10c-4156-9053-a0b2d4af4214
+    jinja: 'Question: {{question}}
+
+      Answer based on following passage.
+
+
+      {{passage}}
+
+
+      Answer:
+
+      ||| {{ answers_spans.spans | join(", ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: question context answer
+    reference: Reading Comprehension with KB
+  79c0d600-8d49-4628-b1c1-d472fb762fa2: !Template
+    answer_choices: null
+    id: 79c0d600-8d49-4628-b1c1-d472fb762fa2
+    jinja: "I am trying to figure out the answer to the question, \"{{question}}\"\
+      \ I found the following text-snippet has the answer. Can you tell me the answer?\n\
+      \n{{passage}} \n\n||| {{ answers_spans.spans | join(\", \") }}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: can you tell me
+    reference: Reading Comprehension with KB
+  ab58cc42-a558-4709-8a73-30194fcf9fa2: !Template
+    answer_choices: null
+    id: ab58cc42-a558-4709-8a73-30194fcf9fa2
+    jinja: 'Passage: {{passage}}
+
+      Question: {{question}}
+
+      Answer: ||| {{ answers_spans.spans | join(", ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: DROP GPT3
+    reference: Prompt format from GPT3 - Table G20
+  ad649b92-59ad-44a9-b328-7bbab49b104f: !Template
+    answer_choices: null
+    id: ad649b92-59ad-44a9-b328-7bbab49b104f
+    jinja: 'Generate a question from the following passage that has the answer, {{
+      answers_spans.spans | join(", ") }}
+
+      Passage : {{passage}}
+
+      Question :
+
+      |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question_with_passage_and_answer
+    reference: ''
+  e9bba528-7782-4f2b-a431-7601f8258628: !Template
+    answer_choices: null
+    id: e9bba528-7782-4f2b-a431-7601f8258628
+    jinja: 'Context: {{passage}}
+
+      I am trying to figure out the answer to the question from the above context.  Can
+      you tell me the answer?
+
+      Question: {{question}}
+
+      Answer:
+
+      ||| {{ answers_spans.spans | join(", ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: context question answer
+    reference: Reading Comprehension with KB
diff --git a/promptsource/templates/duorc/ParaphraseRC/templates.yaml b/promptsource/templates/duorc/ParaphraseRC/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..016919e3817954ac8c7a3673614c06a1591297d9
--- /dev/null
+++ b/promptsource/templates/duorc/ParaphraseRC/templates.yaml
@@ -0,0 +1,223 @@
+dataset: duorc
+subset: ParaphraseRC
+templates:
+  09adcadd-fa7b-4154-91cb-fe822bf8e00e: !Template
+    answer_choices: null
+    id: 09adcadd-fa7b-4154-91cb-fe822bf8e00e
+    jinja: '{% if no_answer == false%}
+
+      Build a movie plot around this: {{ question }} {{answers|choice}}
+
+      |||
+
+      {{ plot }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: build_story_around_qa
+    reference: Given the question-answer pair, generate a relevant plot.
+  0c7049c0-750a-46b7-af38-dd1e9fcb5217: !Template
+    answer_choices: null
+    id: 0c7049c0-750a-46b7-af38-dd1e9fcb5217
+    jinja: 'I am trying to decide whether it''s worth it to invest in this film proposal.
+      Can you help me answer a few questions? If you can''t, please say "{{"No I can''t"}}".
+
+
+      Question: {{question}}
+
+      Movie title: {{title}}
+
+      Movie plot: {{plot}}
+
+      |||
+
+      {% if no_answer %}
+
+      No I can''t
+
+      {% else %}
+
+      {{answers|choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: decide_worth_it
+    reference: ''
+  594d0551-d737-4680-a7a5-8393acc6dbb7: !Template
+    answer_choices: null
+    id: 594d0551-d737-4680-a7a5-8393acc6dbb7
+    jinja: 'Question: {{question}}
+
+      If there is no answer, please output "{{"Insufficient information to provide
+      an answer."}}".
+
+      Movie title: {{title}}
+
+      Context: {{plot}}
+
+      |||
+
+      {% if no_answer %}
+
+      Insufficient information to provide an answer.
+
+      {% else %}
+
+      {{answers|choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: question_answering
+    reference: Given a passage and a question, generate an answer.
+  805f121a-6bd4-4803-9428-ea733f385add: !Template
+    answer_choices: null
+    id: 805f121a-6bd4-4803-9428-ea733f385add
+    jinja: 'I am a movie director and I just received the following movie plot. Could
+      you help me answer this question? If not, let me know by writing "{{"Not answerable"}}".
+
+
+      Plot title: {{title}}
+
+      Movie plot: {{plot}}
+
+      My question: {{question}}
+
+      |||
+
+      {% if no_answer %}
+
+      Not answerable
+
+      {% else %}
+
+      {{answers|choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: movie_director
+    reference: ''
+  842e346b-2d26-43a2-9a3a-9154f04eb76a: !Template
+    answer_choices: null
+    id: 842e346b-2d26-43a2-9a3a-9154f04eb76a
+    jinja: 'Generate a question about the following movie plot: {{ plot }}
+
+      |||
+
+      {{ question }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question
+    reference: Given the plot, generate a question.
+  945053f7-6ad3-4c08-b7dd-5413564f7467: !Template
+    answer_choices: null
+    id: 945053f7-6ad3-4c08-b7dd-5413564f7467
+    jinja: 'Extract the answer to the following question from the movie plot. If the
+      question isn''t answerable, please output "{{"Can''t answer"}}".
+
+      Question: {{question}}
+
+      Title: {{title}}
+
+      Movie plot: {{plot}}
+
+      |||
+
+      {% if no_answer %}
+
+      Can''t answer
+
+      {% else %}
+
+      {{answers | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: extract_answer
+    reference: ''
+  a8597645-cfed-4f54-ba0d-c23eaafaa131: !Template
+    answer_choices: null
+    id: a8597645-cfed-4f54-ba0d-c23eaafaa131
+    jinja: 'Suggest a movie title for the following movie plot: {{plot}}
+
+      |||
+
+      {{title}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: title_generation
+    reference: Given the plot for the movie, suggest a title.
+  c6f75398-a993-44d6-a494-78961a7dc1b7: !Template
+    answer_choices: null
+    id: c6f75398-a993-44d6-a494-78961a7dc1b7
+    jinja: 'Please answer the following question about this movie plot. If it''s un-answerable,
+      please output "{{"No answer"}}".
+
+
+      Question: {{question}}
+
+      Movie plot title: {{title}}
+
+      Movie plot: {{plot}}
+
+      |||
+
+      {% if no_answer %}
+
+      No answer
+
+      {% else %}
+
+      {{answers | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: answer_question
+    reference: ''
+  ce3649d8-fd90-4a77-8819-4eb20b1c83a9: !Template
+    answer_choices: null
+    id: ce3649d8-fd90-4a77-8819-4eb20b1c83a9
+    jinja: "{% if no_answer == false%}\nGenerate a question that has the following\
+      \ answer: \n{{answers|choice}} \nfor the following movie plot: \n{{plot}}\n\
+      |||\n{{question}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question_by_answer
+    reference: Given the passage and the answer, generate a question which has that
+      answer.
diff --git a/promptsource/templates/duorc/SelfRC/templates.yaml b/promptsource/templates/duorc/SelfRC/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..198279547a21f6f927e08092f5ad1a404e58811f
--- /dev/null
+++ b/promptsource/templates/duorc/SelfRC/templates.yaml
@@ -0,0 +1,223 @@
+dataset: duorc
+subset: SelfRC
+templates:
+  1f544641-ba15-44ef-bfcd-c951d320eb9a: !Template
+    answer_choices: null
+    id: 1f544641-ba15-44ef-bfcd-c951d320eb9a
+    jinja: "{% if no_answer == false%}\nGenerate a question that has the following\
+      \ answer: \n{{answers|choice}} \nfor the following movie plot: \n{{plot}}\n\
+      |||\n{{question}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question_by_answer
+    reference: Given the passage and the answer, generate a question which has that
+      answer.
+  289254d0-e382-4c9d-9638-984c01fe7391: !Template
+    answer_choices: null
+    id: 289254d0-e382-4c9d-9638-984c01fe7391
+    jinja: 'I am a movie director and I just received the following movie plot. Could
+      you help me answer this question? If not, let me know by writing "{{"Not answerable"}}".
+
+
+      Plot title: {{title}}
+
+      Movie plot: {{plot}}
+
+      My question: {{question}}
+
+      |||
+
+      {% if no_answer %}
+
+      Not answerable
+
+      {% else %}
+
+      {{answers|choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: movie_director
+    reference: ''
+  606e9fc0-d07d-45e6-a828-b786fd3a10da: !Template
+    answer_choices: null
+    id: 606e9fc0-d07d-45e6-a828-b786fd3a10da
+    jinja: 'Extract the answer to the following question from the movie plot. If the
+      question isn''t answerable, please output "{{"Can''t answer"}}".
+
+      Question: {{question}}
+
+      Title: {{title}}
+
+      Movie plot: {{plot}}
+
+      |||
+
+      {% if no_answer %}
+
+      Can''t answer
+
+      {% else %}
+
+      {{answers | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: extract_answer
+    reference: ''
+  af62f222-a8d2-439f-9586-52e0279d25cc: !Template
+    answer_choices: null
+    id: af62f222-a8d2-439f-9586-52e0279d25cc
+    jinja: 'Generate a question about the following movie plot: {{ plot }}
+
+      |||
+
+      {{ question }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question
+    reference: Given the plot, generate a question.
+  c1829c38-eae3-49a9-a047-f89316f58140: !Template
+    answer_choices: null
+    id: c1829c38-eae3-49a9-a047-f89316f58140
+    jinja: 'Please answer the following question about this movie plot. If it''s un-answerable,
+      please output "{{"No answer"}}".
+
+
+      Question: {{question}}
+
+      Movie plot title: {{title}}
+
+      Movie plot: {{plot}}
+
+      |||
+
+      {% if no_answer %}
+
+      No answer
+
+      {% else %}
+
+      {{answers | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: answer_question
+    reference: ''
+  c76b38f1-b47a-4354-960d-58d2f0974d14: !Template
+    answer_choices: null
+    id: c76b38f1-b47a-4354-960d-58d2f0974d14
+    jinja: '{% if no_answer == false%}
+
+      Build a movie plot around this: {{ question }} {{answers|choice}}
+
+      |||
+
+      {{ plot }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: build_story_around_qa
+    reference: Given the question-answer pair, generate a relevant plot.
+  c917a801-28fe-4c78-93d8-8e43897aa613: !Template
+    answer_choices: null
+    id: c917a801-28fe-4c78-93d8-8e43897aa613
+    jinja: 'Question: {{question}}
+
+      If there is no answer, please output "{{"Insufficient information to provide
+      an answer."}}".
+
+      Movie title: {{title}}
+
+      Context: {{plot}}
+
+      |||
+
+      {% if no_answer %}
+
+      Insufficient information to provide an answer.
+
+      {% else %}
+
+      {{answers|choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: question_answering
+    reference: Given a passage and a question, generate an answer.
+  d486ac96-de6b-403a-8628-5adb23252194: !Template
+    answer_choices: null
+    id: d486ac96-de6b-403a-8628-5adb23252194
+    jinja: 'Suggest a movie title for the following movie plot: {{plot}}
+
+      |||
+
+      {{title}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: title_generation
+    reference: Given the plot for the movie, suggest a title.
+  f64279e3-dc9b-4480-9aa6-72d9d1ca2287: !Template
+    answer_choices: null
+    id: f64279e3-dc9b-4480-9aa6-72d9d1ca2287
+    jinja: 'I am trying to decide whether it''s worth it to invest in this film proposal.
+      Can you help me answer a few questions? If you can''t, please say "{{"No I can''t"}}".
+
+
+      Question: {{question}}
+
+      Movie title: {{title}}
+
+      Movie plot: {{plot}}
+
+      |||
+
+      {% if no_answer %}
+
+      No I can''t
+
+      {% else %}
+
+      {{answers|choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: decide_worth_it
+    reference: ''
diff --git a/promptsource/templates/e2e_nlg_cleaned/templates.yaml b/promptsource/templates/e2e_nlg_cleaned/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5122de2b5530491ce3957a6992d7564f383815c9
--- /dev/null
+++ b/promptsource/templates/e2e_nlg_cleaned/templates.yaml
@@ -0,0 +1,301 @@
+dataset: e2e_nlg_cleaned
+templates:
+  0f54b6e2-42c0-45ec-8ea2-2e6204388f76: !Template
+    answer_choices: null
+    id: 0f54b6e2-42c0-45ec-8ea2-2e6204388f76
+    jinja: 'Combine all of the following data into a concise and grammatically correct
+      sentence:
+
+      {% for feature in meaning_representation.split("]") %}
+
+      {% set key = feature.split("[")[0].replace(",","") %}
+
+      {% set value = feature.replace(",","").replace(key+"[", '''''''') %}
+
+      {% if value != "" %}
+
+      {{key}} : {{value}}
+
+      {% endif %}
+
+      {%- endfor %}
+
+      ||| {{human_reference}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: e2e_basic_3
+    reference: ''
+  14db0e7a-f7d1-4bd0-bfb3-f611be608c4a: !Template
+    answer_choices: null
+    id: 14db0e7a-f7d1-4bd0-bfb3-f611be608c4a
+    jinja: '{% set vars = {''key'':"eatType", ''value'':""} %}
+
+      {% for feature in meaning_representation.split("]") if vars[''key'']  in feature
+      %}
+
+      {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+      '''')}) %}
+
+      {%- endfor %}
+
+      {% if vars["value"]|length > 0 %}
+
+      {{human_reference}} What type of dinery is the passage talking about?  ||| {{vars[''value'']}}
+
+      {% endif %}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: eat_type_1
+    reference: ''
+  18f74817-9f8c-4fd5-bc00-6e0016a40dcc: !Template
+    answer_choices: null
+    id: 18f74817-9f8c-4fd5-bc00-6e0016a40dcc
+    jinja: '{% set vars = {''key'':"food", ''value'':""} %}
+
+      {% for feature in meaning_representation.split("]") if vars[''key'']  in feature
+      %}
+
+      {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+      '''')}) %}
+
+      {%- endfor %}
+
+      {% if vars["value"]|length > 0 %}
+
+      {{human_reference}} From the passage given above, what type of food do you think
+      is served at this dinery?  ||| {{vars[''value'']}}
+
+      {% endif %}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: food_1
+    reference: ''
+  1acabbc3-c9b9-4624-a684-29faeccff46f: !Template
+    answer_choices: null
+    id: 1acabbc3-c9b9-4624-a684-29faeccff46f
+    jinja: 'Given the following data about a dinery:
+
+      {% for feature in meaning_representation.split("]") %}
+
+      {% set key = feature.split("[")[0].replace(",","") %}
+
+      {% set value = feature.replace(",","").replace(key+"[", '''''''') %}
+
+      {% if value != "" %}
+
+      {{key}} : {{value}}
+
+      {% endif %}
+
+      {%- endfor %}
+
+      Generate a sentence about this dinery. ||| {{human_reference}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: e2e_basic_2
+    reference: ''
+  418c7942-06e8-48a7-b5f4-9f15bb874edf: !Template
+    answer_choices: null
+    id: 418c7942-06e8-48a7-b5f4-9f15bb874edf
+    jinja: '{% set vars = {''key'':"area", ''value'':""} %}
+
+      {% for feature in meaning_representation.split("]") if vars[''key'']  in feature
+      %}
+
+      {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+      '''')}) %}
+
+      {%- endfor %}
+
+      {% if vars["value"]|length > 0 %}
+
+      {{human_reference}} From the passage given above, where is the location of the
+      diner?  ||| {{vars[''value'']}}
+
+      {% endif %}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: area_1
+    reference: ''
+  51666217-46cf-4950-bf63-108ed16e074c: !Template
+    answer_choices: null
+    id: 51666217-46cf-4950-bf63-108ed16e074c
+    jinja: '{% set vars = {''key'':"familyFriendly", ''value'':""} %}
+
+      {% for feature in meaning_representation.split("]") if vars[''key'']  in feature
+      %}
+
+      {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+      '''')}) %}
+
+      {%- endfor %}
+
+      {% if vars["value"]|length > 0 %}
+
+      {{human_reference}} Is the dinery from the passage family friendly?  ||| {{vars[''value'']}}
+
+      {% endif %}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: family_1
+    reference: ''
+  6e5f3eff-fab1-4c33-a296-5ac662754e87: !Template
+    answer_choices: null
+    id: 6e5f3eff-fab1-4c33-a296-5ac662754e87
+    jinja: '{% set vars = {''key'':"near", ''value'':""} %}
+
+      {% for feature in meaning_representation.split("]") if vars[''key'']  in feature
+      %}
+
+      {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+      '''')}) %}
+
+      {%- endfor %}
+
+      {% if vars["value"]|length > 0 %}
+
+      {{human_reference}} Name the landmark that is close to this dinery according
+      to the passage.  ||| {{vars[''value'']}}
+
+      {% endif %}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: near_1
+    reference: ''
+  711bcf63-be82-4937-bdef-0c379d20bb74: !Template
+    answer_choices: null
+    id: 711bcf63-be82-4937-bdef-0c379d20bb74
+    jinja: 'How would we create a sentence out of the following data: {{meaning_representation}}
+      ?
+
+      ||| {{human_reference}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: e2e_basic_4
+    reference: ''
+  83992c17-745f-4940-b626-b01a85ba66c1: !Template
+    answer_choices: null
+    id: 83992c17-745f-4940-b626-b01a85ba66c1
+    jinja: '{% set vars = {''key'':"name", ''value'':""} %}
+
+      {% for feature in meaning_representation.split("]") if vars[''key'']  in feature
+      %}
+
+      {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+      '''')}) %}
+
+      {%- endfor %}
+
+      {% if vars["value"]|length > 0 %}
+
+      {{human_reference}} From the passage given above, what is the name of the dinery?  |||
+      {{vars[''value'']}}
+
+      {% endif %}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: name_1
+    reference: ''
+  b67da63d-f220-4b9f-ae82-b4addf0c7573: !Template
+    answer_choices: null
+    id: b67da63d-f220-4b9f-ae82-b4addf0c7573
+    jinja: '{% set vars = {''key'':"customer rating", ''value'':""} %}
+
+      {% for feature in meaning_representation.split("]") if vars[''key'']  in feature
+      %}
+
+      {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+      '''')}) %}
+
+      {%- endfor %}
+
+      {% if vars["value"]|length > 0 %}
+
+      {{human_reference}} According to this passage, what is the rating given to this
+      dinery?  ||| {{vars[''value'']}}
+
+      {% endif %}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: rating_1
+    reference: ''
+  bdecbb5a-d3e8-46f3-9ea8-22025bc59e3b: !Template
+    answer_choices: null
+    id: bdecbb5a-d3e8-46f3-9ea8-22025bc59e3b
+    jinja: 'Given the following data: {{meaning_representation}} Generate a sentence
+      about this dinery. ||| {{human_reference}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: e2e_basic_1
+    reference: ''
+  f9089a30-1c6c-4a43-a5eb-586b1dcd72f2: !Template
+    answer_choices: null
+    id: f9089a30-1c6c-4a43-a5eb-586b1dcd72f2
+    jinja: '{% set vars = {''key'':"priceRange", ''value'':""} %}
+
+      {% for feature in meaning_representation.split("]") if vars[''key'']  in feature
+      %}
+
+      {% set temp = vars.update({''value'':feature.replace(",","").replace(vars[''key'']+"[",
+      '''')}) %}
+
+      {%- endfor %}
+
+      {% if vars["value"]|length > 0 %}
+
+      {{human_reference}} According to the passage, how much would one have to pay
+      to eat here?  ||| {{vars[''value'']}}
+
+      {% endif %}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: price_range_1
+    reference: ''
diff --git a/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml b/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4f9fbdd780e5b21108996e6e0710aa08d7b243c2
--- /dev/null
+++ b/promptsource/templates/ecthr_cases/alleged-violation-prediction/templates.yaml
@@ -0,0 +1,133 @@
+dataset: ecthr_cases
+subset: alleged-violation-prediction
+templates:
+  32404ed1-2276-401f-bb93-2937d9919585: !Template
+    answer_choices: null
+    id: 32404ed1-2276-401f-bb93-2937d9919585
+    jinja: '{{facts | join("\n")}}
+
+
+      These facts show potential violation of European Convention on Human Rights
+      articles. The number of violated articles is
+
+      |||
+
+      {{{1:"one",2:"two",3:"three",4:"four or more"}[[4,labels | length] | min]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: implicit_advice_number
+    reference: implicitly ask for number of violated articles (rather than quantity)
+  3e5ba238-98ad-4d25-b84f-f226158ef8d6: !Template
+    answer_choices: null
+    id: 3e5ba238-98ad-4d25-b84f-f226158ef8d6
+    jinja: "The following facts relate to a claim brought before the European Court\
+      \ of Human Rights (ECtHR). \n{{facts | join(\"\\n\")}}\n\nQuestion: How many\
+      \ substantive articles in the European Convention on Human Rights could have\
+      \ been breached on these facts? If more than three substantive articles are\
+      \ breached, answer \"{{'several'}}\".\nAnswer:\n|||\n{{{1:\"one\",2:\"two\"\
+      ,3:\"three\",4:\"several\"}[[4,labels | length] | min]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: ecthr_alleged_articles_declaration_at_end
+    reference: Explicitly ask question at end
+  7a56f5cc-20b5-4543-bb20-9c616d3f36dc: !Template
+    answer_choices: null
+    id: 7a56f5cc-20b5-4543-bb20-9c616d3f36dc
+    jinja: 'Question: Have {{"one"}}, {{"two"}}, {{"three"}}, or {{"several"}} articles
+      of the ECHR been violated on these facts?
+
+      {{facts | join("\n")}}
+
+
+      Answer:
+
+      |||
+
+      {{{1:"one",2:"two",3:"three",4:"several"}[[4,labels | length] | min]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: ecthr_alleged_articles_question_at_start
+    reference: Explicitly ask question at start of prompt
+  96fb3903-c1e4-4752-8b05-5e8c1c12370a: !Template
+    answer_choices: null
+    id: 96fb3903-c1e4-4752-8b05-5e8c1c12370a
+    jinja: '{{facts | join("\n")}}
+
+
+      In ruling on this case I have needed to carefully consider the submissions of
+      the parties. The plaintiffs allege breaches at the European Court of Human Rights
+      - the number of sections allegedly breached is
+
+      |||
+
+      {{{1:"one",2:"two",3:"three",4:"more than three"}[[4,labels | length] | min]}}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: implicit_judgment_paragraph
+    reference: implicitly asking for quantity as part of a judgment
+  b4fb5e6e-5e91-4f39-82ba-45dba7b71aad: !Template
+    answer_choices: null
+    id: b4fb5e6e-5e91-4f39-82ba-45dba7b71aad
+    jinja: '{{facts | join("\n")}}
+
+
+      Question: Is it true that the facts in this case indicate more than two sections
+      of the European Human Rights Convention were allegedly violated? Answer "yes"
+      or "no"
+
+      Answer:
+
+      |||
+
+      {{{1:"no",2:"no",3:"yes",4:"yes"}[[4,labels | length] | min]}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: confirm number of violated articles
+    reference: ask for yes/no confirmation of the number of violated articles
+  e3e9046e-c631-4cf3-b1d3-98c08d88e62e: !Template
+    answer_choices: null
+    id: e3e9046e-c631-4cf3-b1d3-98c08d88e62e
+    jinja: 'The following is an extract of facts from a judgment handed down by the
+      European Court of Human Rights.
+
+
+      {{facts[:10] | join("\n")}}
+
+
+      {% if silver_rationales | length > 0 %}
+
+      Additionally, the court cited the following facts elsewhere in the decision
+
+      {% for loc in silver_rationales[:10] %}
+
+      {{facts[loc]}}
+
+      {% endfor %}
+
+      {% endif %}
+
+
+      Question: Have {{"one"}}, {{"two"}}, {{"three"}}, or {{"several"}} articles
+      of the ECHR been violated on these facts?
+
+      Answer:
+
+      |||
+
+      {{{1:"one",2:"two",3:"three",4:"several"}[[4,labels | length] | min]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: silver_rationales
+    reference: ''
diff --git a/promptsource/templates/emo/templates.yaml b/promptsource/templates/emo/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e6997752de8e83ab96035fd435c09891ffbb7527
--- /dev/null
+++ b/promptsource/templates/emo/templates.yaml
@@ -0,0 +1,198 @@
+dataset: emo
+templates:
+  2603600f-2d49-40f5-a8c6-05c9b38eab0c: !Template
+    answer_choices: something else ||| happy ||| sad ||| angry
+    id: 2603600f-2d49-40f5-a8c6-05c9b38eab0c
+    jinja: "Person A says something, Person B responds, and then Person A says something.\
+      \ Here's their conversation: \n\n\"{{text}}\"\n\nGiven the context and the last\
+      \ message, how would you best describe Person A's emotion - {{\"happy\"}}, {{\"\
+      sad\"}}, {{\"angry\"}}, or {{\"something else\"}}?\n\n|||\n\n{{ answer_choices\
+      \ [label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: persons_describe
+    reference: ''
+  4b078a4e-1a04-4401-a65e-a3d30c7d16ad: !Template
+    answer_choices: something else ||| happy ||| sad ||| angry
+    id: 4b078a4e-1a04-4401-a65e-a3d30c7d16ad
+    jinja: "Consider this textual dialogue of 3 messages between 2 participants who\
+      \ took turns to talk: \n\n\"{{text}}\"\"\n\nWhat would you say is the underlying\
+      \ emotion of the final message in this dialogue? {{\"happy\"}}, {{\"sad\"}},\
+      \ {{\"angry\"}}, or {{\"something else\"}}?\n\n|||\n\n{{ answer_choices [label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: final_message
+    reference: ''
+  6699f3ed-cb6b-4b85-a266-cb5311bb6018: !Template
+    answer_choices: another emotion ||| happiness ||| sadness ||| angriness
+    id: 6699f3ed-cb6b-4b85-a266-cb5311bb6018
+    jinja: '"{{text}}"
+
+
+      What emotion do you think was felt by the speaker of the last utterance of this
+      dialogue? {{answer_choices[1]}}, {{answer_choices[2]}}, {{answer_choices[3]}}
+      or {{answer_choices[0]}}
+
+
+      |||
+
+
+      {{answer_choices[label]}}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: what_emotion_do_you_think
+    reference: ''
+  70f652fe-8c2b-42f4-ac1b-2026d040d80e: !Template
+    answer_choices: another state ||| happy ||| sad ||| angry
+    id: 70f652fe-8c2b-42f4-ac1b-2026d040d80e
+    jinja: "Consider this short dialogue:\n\n{{text}}\n\nHow would you describe the\
+      \ emotional state of the last person to speak? {{answer_choices[1]}}, {{answer_choices[2]}},\
+      \ {{answer_choices[3]}} or {{answer_choices[0]}} \n\n|||\n\n{{answer_choices[label]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: emotional_state
+    reference: ''
+  8cd68ed9-60d6-4e01-a961-e1af07263646: !Template
+    answer_choices: something else ||| happy ||| sad ||| angry
+    id: 8cd68ed9-60d6-4e01-a961-e1af07263646
+    jinja: 'Dialogue between speaker A and B:
+
+
+      {{text}}
+
+
+      How would you say A feels in the last message? Is it {{answer_choices[1]}},
+      {{answer_choices[2]}}, {{answer_choices[3]}} or {{answer_choices[0]}}? ||| {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: dialogue_between
+    reference: ''
+  99f6f2b9-ed9f-42f7-b0bc-249cead1a82f: !Template
+    answer_choices: something else ||| happy ||| sad ||| angry
+    id: 99f6f2b9-ed9f-42f7-b0bc-249cead1a82f
+    jinja: "Person A says something, Person B responds, and then Person A says something.\
+      \ Here's their conversation: \n\n\"{{text}}\"\n\nWhat emotion can be inferred\
+      \ from the context and Person A's last message? {{\"happy\"}}, {{\"sad\"}},\
+      \ {{\"angry\"}}, or  {{\"something else\"}}?\n\n|||\n\n{{ answer_choices [label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: persons_infer
+    reference: ''
+  9a87023c-176a-4031-9ef5-92a775ca9d83: !Template
+    answer_choices: something else ||| happy ||| sad ||| angry
+    id: 9a87023c-176a-4031-9ef5-92a775ca9d83
+    jinja: "Consider this textual dialogue of 3 messages between 2 participants who\
+      \ took turns to talk: \n\n\"{{text}}\"\n\nGiven the context, what emotion can\
+      \ you detect in the message from the person who spoke last? {{\"happy\"}}, {{\"\
+      sad\"}}, {{\"angry\"}}, or {{\"something else\"}}?\n\n|||\n\n{{ answer_choices\
+      \ [label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: spoke_last
+    reference: ''
+  f5c9ceea-40c8-4837-a631-b40b1d30f015: !Template
+    answer_choices: another feeling ||| happiness ||| sadness ||| angriness
+    id: f5c9ceea-40c8-4837-a631-b40b1d30f015
+    jinja: '"{{text}}"
+
+
+      In this dialogue, what emotion was felt by the first speaker in the final answer?
+      {{answer_choices[1]}}, {{answer_choices[2]}}, {{answer_choices[3]}} or {{answer_choices[0]}}
+
+
+      |||
+
+
+      {{answer_choices[label]}}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: feel_when_last_answer
+    reference: ''
+  f686cef0-6174-466d-b87d-672aaf5f9caa: !Template
+    answer_choices: something else ||| happy ||| sad ||| angry
+    id: f686cef0-6174-466d-b87d-672aaf5f9caa
+    jinja: '"{{text}}"
+
+      This was a dialogue exchange between 2 people who took turns to talk. Given
+      the context, which of the following options best describes the emotion that
+      the last speaker is feeling? {{"happy"}}, {{"sad"}}. {{"angry"}}, or {{"something
+      else"}}?
+
+
+      |||
+
+
+      {{ answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: feeling
+    reference: ''
+  fcb82da7-3d47-4763-a8e5-84e559afeddf: !Template
+    answer_choices: something else ||| happy ||| sad ||| angry
+    id: fcb82da7-3d47-4763-a8e5-84e559afeddf
+    jinja: 'In the dialogue below, would you say the first speaker feels {{answer_choices[1]}},
+      {{answer_choices[2]}}, {{answer_choices[3]}} or {{answer_choices[0]}} when the
+      last sentence is uttered?
+
+      {{text}}
+
+
+      |||
+
+
+      {{answer_choices[label]}}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: in_this_dialogue
+    reference: ''
diff --git a/promptsource/templates/emotion/templates.yaml b/promptsource/templates/emotion/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..80c065a7b5d0d4975a44479c891f5a2a0279857b
--- /dev/null
+++ b/promptsource/templates/emotion/templates.yaml
@@ -0,0 +1,69 @@
+dataset: emotion
+templates:
+  2da087fe-8cca-4f92-b19c-babccb26a510: !Template
+    answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+    id: 2da087fe-8cca-4f92-b19c-babccb26a510
+    jinja: '{{text}} The emotion expressed for the message is ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_3
+    reference: emotion is
+  5fbc4f16-4f7f-4c82-b35d-6e68eced0c70: !Template
+    answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+    id: 5fbc4f16-4f7f-4c82-b35d-6e68eced0c70
+    jinja: '{{text}} What is the emotion expressed in this message? ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_2
+    reference: what is present
+  7bbb7b75-47ca-4bc6-8537-5a3be683172c: !Template
+    answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+    id: 7bbb7b75-47ca-4bc6-8537-5a3be683172c
+    jinja: The following message expresses what emotion? {{text}} ||| {{ answer_choices
+      [label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_0
+    reference: direct basic emotions
+  a2c026ba-2b54-451b-84a0-b302f37bbabe: !Template
+    answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+    id: a2c026ba-2b54-451b-84a0-b302f37bbabe
+    jinja: '{{text}} How will you feel about the message? ||| {{ answer_choices [label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_1
+    reference: how you feel
+  b88c0f70-0362-499b-b42e-da9bd84e553c: !Template
+    answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+    id: b88c0f70-0362-499b-b42e-da9bd84e553c
+    jinja: "'Given the message: {{text}} \n\nOut of the options, {{\"sadness\"}},\
+      \ {{\"joy\"}}, {{\"love\"}}, {{\"anger\"}}, {{\"fear\"}} and {{\"surprise\"\
+      }}, \n\nthe emotion in the message is  ||| {{ answer_choices [label] }}'"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_5
+    reference: out of six emotions
+  ef04c109-9b8d-4ea3-b8f5-646ec235c568: !Template
+    answer_choices: sadness ||| joy ||| love ||| anger ||| fear ||| surprise
+    id: ef04c109-9b8d-4ea3-b8f5-646ec235c568
+    jinja: '{{text}} What emotion does the writer express for the message? ||| {{
+      answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_4
+    reference: emotion of the writer
diff --git a/promptsource/templates/esnli/templates.yaml b/promptsource/templates/esnli/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..52f92123eb90fa7d57173f05fa5566ebd39eda34
--- /dev/null
+++ b/promptsource/templates/esnli/templates.yaml
@@ -0,0 +1,170 @@
+dataset: esnli
+templates:
+  16206cff-dd01-408e-b0b4-daabb750b38b: !Template
+    answer_choices: null
+    id: 16206cff-dd01-408e-b0b4-daabb750b38b
+    jinja: 'Explain why the relation between the following two sentences can be described
+      as {{ ["an entailment", "neutral", "a contradiction"][label] }}.
+
+
+      Sentence 1: {{premise}}
+
+
+      Sentence 2: {{hypothesis}}
+
+      |||
+
+      {{explanation_1}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: '  explain_3'
+    reference: ''
+  3d077f02-bba0-4019-bacd-bf3e7119b1f7: !Template
+    answer_choices: null
+    id: 3d077f02-bba0-4019-bacd-bf3e7119b1f7
+    jinja: "{% if label == 0 %} \nWhy does the first sentence entail the second?\n\
+      {% elif label == 1 %}\nWhy do the two sentences neither entail nor contradict\
+      \ each other?\n{% else %}\nWhy do these two sentences contradict each other?\n\
+      {% endif%}\n\nFirst sentence: \n{{premise}} \n\nSecond sentence:\n{{hypothesis}}\n\
+      |||\n{{explanation_1}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: '  explain_4'
+    reference: ''
+  3e84960e-99db-42bf-9370-50858a92318c: !Template
+    answer_choices: null
+    id: 3e84960e-99db-42bf-9370-50858a92318c
+    jinja: "Given than:\n{{ premise }}\n{% if label == 0 %} \nWhy is it always true\
+      \ that:\n{% elif label == 1 %}\nWhy it cannot be concluded that:\n{% else %}\n\
+      Why is it necessarily false that:\n{% endif%}\n{{ hypothesis }}\n|||\n{{ explanation_1\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: explain_1
+    reference: ''
+  643d3444-99f8-4a02-8d76-12a9b719edea: !Template
+    answer_choices: entails ||| neutral ||| contradicts
+    id: 643d3444-99f8-4a02-8d76-12a9b719edea
+    jinja: 'First sentence: {{ premise }}
+
+
+      Second sentence: {{ hypothesis }}
+
+
+      Determine whether the first sentence {{ "entails" }}, {{ "contradicts" }} or
+      is {{ "neutral" }} with regard to the second.
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: class_2_reverse
+    reference: ''
+  70728515-9e3e-4b04-b57c-ee1b30716547: !Template
+    answer_choices: entailment ||| neutral ||| contradiction
+    id: 70728515-9e3e-4b04-b57c-ee1b30716547
+    jinja: 'Premise: {{ premise }}
+
+
+      Hypothesis: {{ hypothesis }}.
+
+
+      Name the relation between the premise and the hypothesis above. Select the correct
+      option: {{ "entailment" }}, {{ "contradiction" }} or {{ "neutral" }}.
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: class_1_reverse
+    reference: ''
+  89b174c7-f4d6-442c-8ac2-10c51595770e: !Template
+    answer_choices: entails ||| neutral ||| contradicts
+    id: 89b174c7-f4d6-442c-8ac2-10c51595770e
+    jinja: 'Determine whether the first sentence {{ "entails" }}, {{ "contradicts"
+      }} or is {{ "neutral" }} with regard to the second.
+
+
+      First sentence: {{ premise }}
+
+
+      Second sentence: {{ hypothesis }}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: class_2
+    reference: ''
+  c102663b-3472-42b5-b633-71f7abd6a457: !Template
+    answer_choices: entailment ||| neutral ||| contradiction
+    id: c102663b-3472-42b5-b633-71f7abd6a457
+    jinja: 'Describe the relation between the following two sentences. The choices
+      are {{ "entailment" }}, {{ "contradiction" }} and {{ "neutral" }}.
+
+
+      First sentence: {{premise}}
+
+
+      Second sentence: {{hypothesis}}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: class_1
+    reference: ''
+  c6cce628-8e69-418b-8676-deae7a782e7f: !Template
+    answer_choices: Yes ||| No ||| No
+    id: c6cce628-8e69-418b-8676-deae7a782e7f
+    jinja: "Does this statement: \n\n{{ premise }} \n\nimply that: \n\n{{ hypothesis\
+      \ }}?\n|||\n{{ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: entail_1
+    reference: ''
+  ef034633-d4d9-47b8-9152-b025b1d61e5b: !Template
+    answer_choices: No ||| No ||| Yes
+    id: ef034633-d4d9-47b8-9152-b025b1d61e5b
+    jinja: "First statement: \n{{ premise }}\n\nSecond statement: \n{{ hypothesis\
+      \ }}\n\nDo the statements above contradict each other?\n|||\n{{ answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: '  contradict_1'
+    reference: ''
+  f64d6196-370b-4501-acb5-e11a5ebf0c5e: !Template
+    answer_choices: null
+    id: f64d6196-370b-4501-acb5-e11a5ebf0c5e
+    jinja: "If we know that:\n{{premise}}\n{% if label == 0 %} \nWhy is it always\
+      \ the case that:\n{% elif label == 1 %}\nWhy it is not necessarily the case\
+      \ that:\n{% else %}\nWhy is it not true that:\n{% endif%}\n{{hypothesis}}\n\
+      |||\n{{explanation_1}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: '  explain_2'
+    reference: ''
diff --git a/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml b/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cbfe6fa30a863e094a0a50ae0cf6bfa8263ead1c
--- /dev/null
+++ b/promptsource/templates/evidence_infer_treatment/1.1/templates.yaml
@@ -0,0 +1,127 @@
+dataset: evidence_infer_treatment
+subset: '1.1'
+templates:
+  1b538c15-d7b7-4139-8755-fb7d28c19a4d: !Template
+    answer_choices: null
+    id: 1b538c15-d7b7-4139-8755-fb7d28c19a4d
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      {{Text[:1200]}} \n\n{{Text[-300:]}}\n\nThe text above contains important details\
+      \ for answering the following questions:\n\nThe relevant annotations:\n\n{{\
+      \ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+      \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nNow on the basis of annotation and the text the outcome is:\n\n{% endif %}\n\
+      \n|||\n\n\n{{Prompts.Outcome[specific_sub_annotation]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_4
+    reference: ''
+  7ce46648-2bcc-4e67-95f5-c2a0d0612f9b: !Template
+    answer_choices: null
+    id: 7ce46648-2bcc-4e67-95f5-c2a0d0612f9b
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      {{ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+      \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\nAfter reading\
+      \ the following text:\n\n{{Text[:1200]}} \n\n{{Text[-300:]}}\n\nThe relevant\
+      \ annotations:\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nNow if the comparator is:\n\n{{Prompts.Comparator[specific_sub_annotation]}}.\n\
+      \nand the intervention is:\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n\
+      \n The outcome is: \n\n{% endif %}\n\n|||\n\n{{Prompts.Outcome[specific_sub_annotation]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_2
+    reference: ''
+  7d618260-32fb-405d-ab79-cec67f589de7: !Template
+    answer_choices: null
+    id: 7d618260-32fb-405d-ab79-cec67f589de7
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      Read the following text:\n\n{{ sub_sub_annotation.pop() | replace(0, \"\") }}\n\
+      {{ sub_sub_annotation.append(range(0, sub_annotation_length) | choice) | replace(None,\
+      \ \"\") }}\n\n{{Text[:1200]}} \n\n{{Text[-300:]}}\n\nNow the comparator is:\n\
+      \n{{Prompts.Comparator[specific_sub_annotation]}}.\n\nThe intervention is:\n\
+      \n{{Prompts.Intervention[specific_sub_annotation]}}.\n\nThe outcome:\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\
+      \nis either {{\"significantly increased\"}}, {{\"significantly decreased\"}}\
+      \ or {{\"no significant difference\"}}. Which is it?\n\n{% endif %}\n\n|||\n\
+      \n{% if sub_annotation_length > 0 %}\n\n{{Prompts.Annotations[specific_sub_annotation].Label[sub_sub_annotation[0]]}}\n\
+      \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_3
+    reference: ''
+  c999469a-20e0-4c10-a707-3c057d5c0245: !Template
+    answer_choices: null
+    id: c999469a-20e0-4c10-a707-3c057d5c0245
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      The following text snippets contain important information:\n\n{{Text[:1200]}}\
+      \ \n\n{{Text[-300:]}}\n\nThe relevant annotations are:\n\n{{ sub_sub_annotation.pop()\
+      \ | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0, sub_annotation_length)\
+      \ | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nNow if the comparator is:\n\n{{Prompts.Comparator[specific_sub_annotation]}}.\n\
+      \nThe intervention will be:\n\n{% endif %}\n\n|||\n\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_1
+    reference: ''
+  da67a99f-0472-4658-a410-afe260749d90: !Template
+    answer_choices: null
+    id: da67a99f-0472-4658-a410-afe260749d90
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      The information required to understand the outcome is below:\n\n{{Text[:1200]}}\
+      \ \n\n{{Text[-300:]}}\n\nThe relevant annotations:\n\n{{ sub_sub_annotation.pop()\
+      \ | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0, sub_annotation_length)\
+      \ | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nThe comparator is:\n\n{{Prompts.Comparator[specific_sub_annotation]}}.\n\n\
+      The intervention is:\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n\
+      \nThe outcome:\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\nis either\
+      \ {{\"significantly increased\"}}, {{\"significantly decreased\"}} or {{\"no\
+      \ significant difference\"}}. Which is it?\n\n{% endif %}\n\n|||\n\n{% if sub_annotation_length\
+      \ > 0 %}\n\n{{Prompts.Annotations[specific_sub_annotation].Label[sub_sub_annotation[0]]}}\n\
+      \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_with_all_info
+    reference: Template with the task definition
+  fbf5600f-5e70-4c15-9608-f53cec32825f: !Template
+    answer_choices: null
+    id: fbf5600f-5e70-4c15-9608-f53cec32825f
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      The first text snippet that is important to understand is:\n\n{{Text[:1200]}}\
+      \ \n\nthe second text snippet is:\n\n{{Text[-300:]}}\n\nThe relevant annotations:\n\
+      \n{{ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+      \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nThe intervention is:\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n\
+      \nThe outcome:\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\nThe comparator\
+      \ is:\n\n{% endif %}\n\n|||\n\n{{Prompts.Comparator[specific_sub_annotation]}}."
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_5
+    reference: ''
diff --git a/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml b/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..23c211392150c1771fb706d963e6e3884fdb910c
--- /dev/null
+++ b/promptsource/templates/evidence_infer_treatment/2.0/templates.yaml
@@ -0,0 +1,127 @@
+dataset: evidence_infer_treatment
+subset: '2.0'
+templates:
+  500b12ee-0b19-4fe5-b799-2746b2906993: !Template
+    answer_choices: null
+    id: 500b12ee-0b19-4fe5-b799-2746b2906993
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      {{Text[:1200]}} \n\n{{Text[-300:]}}\n\nThe text above contains important details\
+      \ for answering the following questions:\n\nThe relevant annotations:\n\n{{\
+      \ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+      \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nNow on the basis of annotation and the text the outcome is:\n\n{% endif %}\n\
+      \n|||\n\n\n{{Prompts.Outcome[specific_sub_annotation]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_4
+    reference: ''
+  55659a3c-4fce-42dd-a925-df6242fc84fa: !Template
+    answer_choices: null
+    id: 55659a3c-4fce-42dd-a925-df6242fc84fa
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      The following text snippets contain important information:\n\n{{Text[:1200]}}\
+      \ \n\n{{Text[-300:]}}\n\nThe relevant annotations are:\n\n{{ sub_sub_annotation.pop()\
+      \ | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0, sub_annotation_length)\
+      \ | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nNow if the comparator is:\n\n{{Prompts.Comparator[specific_sub_annotation]}}.\n\
+      \nThe intervention will be:\n\n{% endif %}\n\n|||\n\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_1
+    reference: ''
+  613e3e1b-2646-4a55-8356-584386a8f0b8: !Template
+    answer_choices: null
+    id: 613e3e1b-2646-4a55-8356-584386a8f0b8
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      The first text snippet that is important to understand is:\n\n{{Text[:1200]}}\
+      \ \n\nthe second text snippet is:\n\n{{Text[-300:]}}\n\nThe relevant annotations:\n\
+      \n{{ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+      \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nThe intervention is:\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n\
+      \nThe outcome:\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\nThe comparator\
+      \ is:\n\n{% endif %}\n\n|||\n\n{{Prompts.Comparator[specific_sub_annotation]}}."
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_5
+    reference: ''
+  97de966b-c753-4856-b5ff-a69d8002e369: !Template
+    answer_choices: null
+    id: 97de966b-c753-4856-b5ff-a69d8002e369
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      Read the following text:\n\n{{ sub_sub_annotation.pop() | replace(0, \"\") }}\n\
+      {{ sub_sub_annotation.append(range(0, sub_annotation_length) | choice) | replace(None,\
+      \ \"\") }}\n\n{{Text[:1200]}} \n\n{{Text[-300:]}}\n\nNow the comparator is:\n\
+      \n{{Prompts.Comparator[specific_sub_annotation]}}.\n\nThe intervention is:\n\
+      \n{{Prompts.Intervention[specific_sub_annotation]}}.\n\nThe outcome:\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\
+      \nis either {{\"significantly increased\"}}, {{\"significantly decreased\"}}\
+      \ or {{\"no significant difference\"}}. Which is it?\n\n{% endif %}\n\n|||\n\
+      \n{% if sub_annotation_length > 0 %}\n\n{{Prompts.Annotations[specific_sub_annotation].Label[sub_sub_annotation[0]]}}\n\
+      \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_3
+    reference: ''
+  dbdf04d6-7447-48c8-8239-9a0e634bf444: !Template
+    answer_choices: null
+    id: dbdf04d6-7447-48c8-8239-9a0e634bf444
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      The information required to understand the outcome is below:\n\n{{Text[:1200]}}\
+      \ \n\n{{Text[-300:]}}\n\nThe relevant annotations:\n\n{{ sub_sub_annotation.pop()\
+      \ | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0, sub_annotation_length)\
+      \ | choice) | replace(None, \"\") }}\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nThe comparator is:\n\n{{Prompts.Comparator[specific_sub_annotation]}}.\n\n\
+      The intervention is:\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n\
+      \nThe outcome:\n\n{{Prompts.Outcome[specific_sub_annotation]}}\n\nis either\
+      \ {{\"significantly increased\"}}, {{\"significantly decreased\"}} or {{\"no\
+      \ significant difference\"}}. Which is it?\n\n{% endif %}\n\n|||\n\n{% if sub_annotation_length\
+      \ > 0 %}\n\n{{Prompts.Annotations[specific_sub_annotation].Label[sub_sub_annotation[0]]}}\n\
+      \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_with_all_info
+    reference: Template with the task definition
+  f923b2a5-3a12-4104-900a-1b5343bb6017: !Template
+    answer_choices: null
+    id: f923b2a5-3a12-4104-900a-1b5343bb6017
+    jinja: "{% set annotation_length = Prompts.Annotations | length %}\n\n{% set specific_sub_annotation\
+      \ = range(0, annotation_length) | choice %}\n\n{% set sub_annotation_length\
+      \ = Prompts.Annotations[specific_sub_annotation].Annotations | length %}\n\n\
+      {% set sub_sub_annotation = [0] %}\n\n{% if sub_annotation_length > 0 %}\n\n\
+      {{ sub_sub_annotation.pop() | replace(0, \"\") }}\n{{ sub_sub_annotation.append(range(0,\
+      \ sub_annotation_length) | choice) | replace(None, \"\") }}\n\nAfter reading\
+      \ the following text:\n\n{{Text[:1200]}} \n\n{{Text[-300:]}}\n\nThe relevant\
+      \ annotations:\n\n{{Prompts.Annotations[specific_sub_annotation].Annotations[sub_sub_annotation[0]]}}\n\
+      \nNow if the comparator is:\n\n{{Prompts.Comparator[specific_sub_annotation]}}.\n\
+      \nand the intervention is:\n\n{{Prompts.Intervention[specific_sub_annotation]}}.\n\
+      \n The outcome is: \n\n{% endif %}\n\n|||\n\n{{Prompts.Outcome[specific_sub_annotation]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_2
+    reference: ''
diff --git a/promptsource/templates/fever/v1.0/templates.yaml b/promptsource/templates/fever/v1.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9e00e9f1a81c268adf6126d19094ed52e36cd875
--- /dev/null
+++ b/promptsource/templates/fever/v1.0/templates.yaml
@@ -0,0 +1,58 @@
+dataset: fever
+subset: v1.0
+templates:
+  0870481e-e5d1-43a1-821e-b11c6bfd2483: !Template
+    answer_choices: null
+    id: 0870481e-e5d1-43a1-821e-b11c6bfd2483
+    jinja: "{{claim}} Is this true?\n|||\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\"\
+      : \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_postprompt
+    reference: CBQA fever, prompt after claim
+  17967f69-187f-4c98-9c32-624736e04412: !Template
+    answer_choices: null
+    id: 17967f69-187f-4c98-9c32-624736e04412
+    jinja: "I've heard that {{claim}} Is this correct?\n|||\n{{\n{\"SUPPORTS\": \"\
+      Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_dialog_style_surrounded
+    reference: CBQA fever, like a conversation, with prompts surrounding claim
+  6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdf: !Template
+    answer_choices: null
+    id: 6cc8f145-3fb4-43a9-aaf1-8c25dd6e2cdf
+    jinja: "Is this statement correct? {{claim}} ||| \n{{\n{\"SUPPORTS\": \"Yes\"\
+      ,\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_preprompt
+    reference: Closed-book QA from only the claim, prompt before the content
+  948f41ab-e6bb-4de6-af3e-7f0b5d5f39a8: !Template
+    answer_choices: null
+    id: 948f41ab-e6bb-4de6-af3e-7f0b5d5f39a8
+    jinja: "\"{{claim}}\" Yes or no?\n|||\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\"\
+      : \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_short
+    reference: CBQA fever, minimal
+  b888ac7f-7482-4b5b-b94d-1ee096eefee5: !Template
+    answer_choices: null
+    id: b888ac7f-7482-4b5b-b94d-1ee096eefee5
+    jinja: "\"{{claim}}\", I have heard. Correct?\n|||\n{{\n{\"SUPPORTS\": \"Yes\"\
+      ,\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_dialog_style_postprompt
+    reference: CBQA fever, like a conversation, prompt after output
diff --git a/promptsource/templates/fever/v2.0/templates.yaml b/promptsource/templates/fever/v2.0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..12e1cf7cf68a6f5b928d400cdd2120c7f9701f13
--- /dev/null
+++ b/promptsource/templates/fever/v2.0/templates.yaml
@@ -0,0 +1,58 @@
+dataset: fever
+subset: v2.0
+templates:
+  43acc4a7-262f-4c7c-9774-3e1e06376c52: !Template
+    answer_choices: null
+    id: 43acc4a7-262f-4c7c-9774-3e1e06376c52
+    jinja: "I've heard that {{claim}} Is this correct?\n|||\n{{\n{\"SUPPORTS\": \"\
+      Yes\",\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_dialog_style_surrounded
+    reference: CBQA fever, like a conversation, with prompts surrounding claim
+  6d109e17-3fc0-4cad-bc97-1ffb2c82d1de: !Template
+    answer_choices: null
+    id: 6d109e17-3fc0-4cad-bc97-1ffb2c82d1de
+    jinja: "{{claim}} Is this true?\n|||\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\"\
+      : \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_postprompt
+    reference: CBQA fever, prompt after claim
+  6e43e0de-988b-45d1-b43d-0ac2c6b396fc: !Template
+    answer_choices: null
+    id: 6e43e0de-988b-45d1-b43d-0ac2c6b396fc
+    jinja: "\"{{claim}}\", I have heard. Correct?\n|||\n{{\n{\"SUPPORTS\": \"Yes\"\
+      ,\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_dialog_style_postprompt
+    reference: CBQA fever, like a conversation, prompt after output
+  a5a3f123-0390-4221-b481-83f1165eabda: !Template
+    answer_choices: null
+    id: a5a3f123-0390-4221-b481-83f1165eabda
+    jinja: "Is this statement correct? {{claim}} ||| \n{{\n{\"SUPPORTS\": \"Yes\"\
+      ,\n \"REFUTES\": \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_preprompt
+    reference: Closed-book QA from only the claim, prompt before the content
+  febc986d-7651-4f0c-bc42-ec22efc76b2c: !Template
+    answer_choices: null
+    id: febc986d-7651-4f0c-bc42-ec22efc76b2c
+    jinja: "\"{{claim}}\" Yes or no?\n|||\n{{\n{\"SUPPORTS\": \"Yes\",\n \"REFUTES\"\
+      : \"No\",\n\"NOT ENOUGH INFO\": \"\"\n}[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa_fever_short
+    reference: CBQA fever, minimal
diff --git a/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml b/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..30c60615123ce358bf6f49712b350c227771856a
--- /dev/null
+++ b/promptsource/templates/financial_phrasebank/sentences_allagree/templates.yaml
@@ -0,0 +1,143 @@
+dataset: financial_phrasebank
+subset: sentences_allagree
+templates:
+  06719321-62e7-4f6e-8f95-464cd2b5ca5c: !Template
+    answer_choices: negative ||| neutral ||| positive
+    id: 06719321-62e7-4f6e-8f95-464cd2b5ca5c
+    jinja: 'Which effect, {{"negative"}}, {{"neutral"}}, {{"positive"}}, of this news
+      on the related share prices?
+
+
+      {{sentence}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: share_price_option
+    reference: ''
+  0beba048-f949-4034-83b6-a3e0e7363f46: !Template
+    answer_choices: negative ||| neutral ||| positive
+    id: 0beba048-f949-4034-83b6-a3e0e7363f46
+    jinja: 'What is the sentiment of the sentence?
+
+
+      {{sentence}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sentiment
+    reference: ''
+  1af36463-8ed9-4574-9157-f029960e1d5e: !Template
+    answer_choices: growth ||| neutral ||| decline
+    id: 1af36463-8ed9-4574-9157-f029960e1d5e
+    jinja: 'What among {{"growth"}}, {{"neutral"}}, {{"decline"}}, does the following
+      argument mean for the local economy?
+
+
+      {{sentence}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: local_economy_option
+    reference: ''
+  461efe04-6883-41e8-80f0-e722a75260fe: !Template
+    answer_choices: growth ||| neutral ||| decline
+    id: 461efe04-6883-41e8-80f0-e722a75260fe
+    jinja: 'What does the following argument mean for the complementary industries?
+
+
+      {{sentence}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: complementary_industries
+    reference: ''
+  5fa16d31-b513-480d-bd1b-1fa8c182fb76: !Template
+    answer_choices: bearish ||| neutral ||| bullish
+    id: 5fa16d31-b513-480d-bd1b-1fa8c182fb76
+    jinja: 'Should an investor be {{"bullish, neutral, or bearish"}} given the following
+      detail?
+
+
+      {{sentence}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: bull_bear
+    reference: ''
+  612be728-b6c8-4636-80b6-8aaa7593a2e1: !Template
+    answer_choices: negative ||| neutral ||| positive
+    id: 612be728-b6c8-4636-80b6-8aaa7593a2e1
+    jinja: 'What is the sentiment {{"negative"}}, {{"neutral"}}, {{"positive"}}, of
+      the sentence?
+
+
+      {{sentence}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sentiment_option
+    reference: ''
+  b7364738-258d-4b81-b322-b9876b6fd93d: !Template
+    answer_choices: growth ||| neutral ||| decline
+    id: b7364738-258d-4b81-b322-b9876b6fd93d
+    jinja: 'What among, {{"growth"}}, {{"neutral"}}, {{"decline"}}, does the following
+      argument mean for the complementary industries?
+
+
+      {{sentence}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: complementary_industries_option
+    reference: ''
+  b85d62c9-a34a-4da2-836e-a0aadbe48291: !Template
+    answer_choices: negative ||| neutral ||| positive
+    id: b85d62c9-a34a-4da2-836e-a0aadbe48291
+    jinja: 'What is the effect of this news on the related share prices?
+
+
+      {{sentence}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: share_price
+    reference: ''
+  e0650133-befc-4aeb-92e1-2f8d6a0200b3: !Template
+    answer_choices: growth ||| neutral ||| decline
+    id: e0650133-befc-4aeb-92e1-2f8d6a0200b3
+    jinja: 'What does the following argument mean for the local economy?
+
+
+      {{sentence}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: local_economy
+    reference: ''
diff --git a/promptsource/templates/freebase_qa/templates.yaml b/promptsource/templates/freebase_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9078a6fc32d6e689118d4a240c56ff03b04bcf99
--- /dev/null
+++ b/promptsource/templates/freebase_qa/templates.yaml
@@ -0,0 +1,80 @@
+dataset: freebase_qa
+templates:
+  02b12d5c-a481-494d-84ea-a210eefa66d2: !Template
+    answer_choices: null
+    id: 02b12d5c-a481-494d-84ea-a210eefa66d2
+    jinja: "{% set question_context = Parses.TopicEntityName | choice %}\n{% set inference_context\
+      \ = Parses.InferentialChain | first %}\n\nThe topic of this question is: {{question_context.split(\"\
+      \ \") | map(\"capitalize\") | join(\" \")}}.\n\nThe answer to this question\
+      \ should be in the following category: {{ inference_context.split(\".\") | last\
+      \ | capitalize | replace(\"_\", \" \")}}\n\nUsing this, answer the following\
+      \ question:\n\n{{RawQuestion}}\n||| \n{% set answer = Parses.Answers | choice\
+      \ %}\n{{answer.AnswersName[0][0].split(\" \") | map(\"capitalize\") | join(\"\
+      \ \") }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: qa_context_2
+    reference: qa prompt with topic and inference chain provided
+  1d583b71-7ef1-49df-b252-e8e1d6910129: !Template
+    answer_choices: null
+    id: 1d583b71-7ef1-49df-b252-e8e1d6910129
+    jinja: 'What category best describes the answer to the following question?
+
+
+      Question: {{RawQuestion}}
+
+      |||
+
+      {% set answer = Parses.InferentialChain | first %}
+
+      {{ answer.split(".") | last | capitalize | replace("_", " ")}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: inference_chain_prompt
+    reference: predicting the inference chain given just the question
+  1fd7e73c-92ac-4e33-be33-80775cbb14df: !Template
+    answer_choices: null
+    id: 1fd7e73c-92ac-4e33-be33-80775cbb14df
+    jinja: "Answer the following question:\n\n{{RawQuestion}}\n||| \n{% set answer\
+      \ = Parses.Answers | choice %}\n{{answer.AnswersName[0][0].split(\" \") | map(\"\
+      capitalize\") | join(\" \") }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: qa_template_basic
+    reference: basic question/answer format
+  30ff02f4-3673-4ea6-a3e0-0df0cf19b021: !Template
+    answer_choices: null
+    id: 30ff02f4-3673-4ea6-a3e0-0df0cf19b021
+    jinja: "{% set context = Parses.TopicEntityName | choice %}\nThe topic of this\
+      \ question is: {{context.split(\" \") | map(\"capitalize\") | join(\" \")}}.\n\
+      \nWith that context, answer the following question:\n\n{{RawQuestion}}\n|||\
+      \ \n{% set answer = Parses.Answers | choice %}\n{{answer.AnswersName[0][0].split(\"\
+      \ \") | map(\"capitalize\") | join(\" \") }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: qa_context_1
+    reference: qa question with simple entity context
+  dbf762f0-2daa-4cc4-af67-ba72aa2c1991: !Template
+    answer_choices: null
+    id: dbf762f0-2daa-4cc4-af67-ba72aa2c1991
+    jinja: "{% set answer = Parses.Answers | choice %}\nFor the following question:\n\
+      \n\"{{RawQuestion}}\" \n\nWhat word or phrase best describes its answer, \"\
+      {{answer.AnswersName[0][0].split(\" \") | map(\"capitalize\") | join(\" \")\
+      \ }}\"? \n||| \n{% set a = Parses.InferentialChain | first %}\n{{ a.split(\"\
+      .\") | last | capitalize | replace(\"_\", \" \")}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: inference_chain_prompt_context
+    reference: determine the inference chain between question and answer
diff --git a/promptsource/templates/generated_reviews_enth/templates.yaml b/promptsource/templates/generated_reviews_enth/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2ed509c75a247e4fc797da3e15d73f8d56412478
--- /dev/null
+++ b/promptsource/templates/generated_reviews_enth/templates.yaml
@@ -0,0 +1,57 @@
+dataset: generated_reviews_enth
+templates:
+  7f158fb6-bbdd-41b8-bed7-21508c9f3c80: !Template
+    answer_choices: no ||| yes
+    id: 7f158fb6-bbdd-41b8-bed7-21508c9f3c80
+    jinja: Does "{{translation.en}}" seem like a positive review to you? ||| {{answer_choices[0
+      if review_star<3 else 1]}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: sentiment_analysis_4
+    reference: stsb_multi_mt_en
+  95136948-3402-4bd4-8a69-1aa7b85461cc: !Template
+    answer_choices: null
+    id: 95136948-3402-4bd4-8a69-1aa7b85461cc
+    jinja: 'Rate the positivity of this review: ({{"1"}} being the lowest and {{"5"}}
+      the highest) "{{translation.en}}" ||| {{review_star}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: sentiment_analysis_5
+    reference: stsb_multi_mt
+  ad12212f-a230-4750-a199-9791628856c4: !Template
+    answer_choices: null
+    id: ad12212f-a230-4750-a199-9791628856c4
+    jinja: "How positive is the review \"{{translation.en}}\"? Give a score between\n\
+      \      {{\"0\"}} and {{\"5\"}}. ||| {{review_star}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: sentiment_analysis_1
+    reference: stsb_multi_mt_en
+  cf8f4dcb-f527-4944-b9ec-a1a3e476c13f: !Template
+    answer_choices: null
+    id: cf8f4dcb-f527-4944-b9ec-a1a3e476c13f
+    jinja: On a scale from {{"1"}} to {{"5"}}, how positive is the review "{{translation.en}}"?
+      ||| {{review_star}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: sentiment_analysis_3
+    reference: stsb_multi_mt_en
+  e6c55d56-23d4-41a4-9908-e9366cc2e167: !Template
+    answer_choices: no ||| yes
+    id: e6c55d56-23d4-41a4-9908-e9366cc2e167
+    jinja: Do you think "{{translation.en}}" is a positive review? ||| {{answer_choices[0
+      if review_star < 3 else 1]}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: sentiment_analysis_2
+    reference: stsb_multi_mt_en
diff --git a/promptsource/templates/gigaword/templates.yaml b/promptsource/templates/gigaword/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3a3fbe7e51893c9b3be48017d46f970cb471f5e7
--- /dev/null
+++ b/promptsource/templates/gigaword/templates.yaml
@@ -0,0 +1,140 @@
+dataset: gigaword
+templates:
+  0a45ae54-4585-4d13-9540-890125d614e0: !Template
+    answer_choices: null
+    id: 0a45ae54-4585-4d13-9540-890125d614e0
+    jinja: '{{document}}
+
+
+      ===
+
+
+      Generate a title for this article: ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: generate_summary_for_this
+    reference: ''
+  2558932f-894a-41ef-be34-32a5afb1f5d8: !Template
+    answer_choices: null
+    id: 2558932f-894a-41ef-be34-32a5afb1f5d8
+    jinja: 'Title: {{summary}}
+
+
+      ||| {{document}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: reverse_writing
+    reference: ''
+  696e561e-1311-4a3e-9ca1-51d1fd77392b: !Template
+    answer_choices: null
+    id: 696e561e-1311-4a3e-9ca1-51d1fd77392b
+    jinja: 'Make a title for this article: {{document}} |||
+
+
+      {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: make_a_title
+    reference: ''
+  7ad1a48a-195d-4c0c-aea5-df0689589f27: !Template
+    answer_choices: null
+    id: 7ad1a48a-195d-4c0c-aea5-df0689589f27
+    jinja: 'First sentence of the article: {{document}}
+
+
+      Title: ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: first_sentence_title
+    reference: ''
+  90c34acf-2f42-4e66-98dc-7453f7e60e60: !Template
+    answer_choices: null
+    id: 90c34acf-2f42-4e66-98dc-7453f7e60e60
+    jinja: '{{document}}
+
+
+      TL;DR: ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: TLDR
+    reference: GPT-2 TLDR
+  93f0c400-501f-43ad-861b-4f67564f2e8f: !Template
+    answer_choices: null
+    id: 93f0c400-501f-43ad-861b-4f67564f2e8f
+    jinja: '{{document}}
+
+
+      ===
+
+
+      Given the above sentence, write its title: ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: write_its_sentence
+    reference: ''
+  a0e699bf-1268-4929-ad13-438c08644118: !Template
+    answer_choices: null
+    id: a0e699bf-1268-4929-ad13-438c08644118
+    jinja: "Write a title for this sentence: {{document}} \n\nTitle: ||| {{summary}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: write_a_title_for_this_sentence
+    reference: ''
+  ac53a797-4d59-455a-b0e6-0e4d7d85f029: !Template
+    answer_choices: null
+    id: ac53a797-4d59-455a-b0e6-0e4d7d85f029
+    jinja: '{{document}} In a nutshell, ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: in_a_nutshell
+    reference: ''
+  d1d4a115-65fd-49eb-bd75-179a46b67ec0: !Template
+    answer_choices: null
+    id: d1d4a115-65fd-49eb-bd75-179a46b67ec0
+    jinja: 'Title: {{summary}}
+
+
+      ===
+
+
+      Write an article with the given title: ||| {{document}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: write_an_article
+    reference: ''
diff --git a/promptsource/templates/glue/ax/templates.yaml b/promptsource/templates/glue/ax/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..75025d97f14e9def6eb232e616dbf2364ceba184
--- /dev/null
+++ b/promptsource/templates/glue/ax/templates.yaml
@@ -0,0 +1,100 @@
+dataset: glue
+subset: ax
+templates:
+  074de970-f1fd-4793-923e-88299502e2f0: !Template
+    answer_choices: entailment ||| neutral ||| contradiction
+    id: 074de970-f1fd-4793-923e-88299502e2f0
+    jinja: 'The relationship between the following sentences can be characterized
+      as {{"entailment"}} (one sentence implies the other), {{"neutral"}} (the sentences
+      don''t necessarily imply or contradict one another), or {{"contradiction"}}
+      (the sentences contract each other).
+
+      {{hypothesis}}
+
+      {{premise}}
+
+      What is the relationship between the sentences?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: relationship
+    reference: ''
+  32ae8811-2a1f-4027-96e8-725ecd08bba1: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: 32ae8811-2a1f-4027-96e8-725ecd08bba1
+    jinja: '{{premise}}
+
+      Given the above, is it necessarily true that "{{hypothesis}}"? {{"A) yes B)
+      no or C) maybe."}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: mean
+    reference: ''
+  3f6b9de8-616b-4a43-a077-e205a4c33a28: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: 3f6b9de8-616b-4a43-a077-e205a4c33a28
+    jinja: 'Consider the hypothesis that "{{hypothesis}}"
+
+      Does this follow from the knowledge that "{{premise}}"
+
+      {{"yes, no, or maybe?"}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: consider
+    reference: ''
+  76803347-b0fd-4dd6-8a04-ab1a6ab314d5: !Template
+    answer_choices: imply ||| neither ||| contradict
+    id: 76803347-b0fd-4dd6-8a04-ab1a6ab314d5
+    jinja: '{{premise}}
+
+      Does the above sentence imply or contradict that "{{hypothesis}}"? Please answer
+      {{"imply, contradict, or neither"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: imply
+    reference: ''
+  8ff27ccf-21d3-45c2-afe4-4531309dfb9f: !Template
+    answer_choices: supports ||| neither ||| contradicts
+    id: 8ff27ccf-21d3-45c2-afe4-4531309dfb9f
+    jinja: 'Consider the {{"premise"}}:
+
+      {{premise}}
+
+      Does this {{"premise"}} support the following {{"hypothesis"}}?
+
+      {{hypothesis}}
+
+      Please answer {{"supports, contradicts, or neither"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: support
+    reference: ''
diff --git a/promptsource/templates/glue/cola/templates.yaml b/promptsource/templates/glue/cola/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e6e6a244eb9004d26aebcba7c72b285b9e4145cc
--- /dev/null
+++ b/promptsource/templates/glue/cola/templates.yaml
@@ -0,0 +1,95 @@
+dataset: glue
+subset: cola
+templates:
+  1d3f5f15-8128-4445-8de5-92365b7e54a8: !Template
+    answer_choices: no ||| yes
+    id: 1d3f5f15-8128-4445-8de5-92365b7e54a8
+    jinja: 'Does the following sentence make sense and use correct English? Please
+      answer {{"yes"}} or {{"no"}}.
+
+      {{sentence}}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Make sense yes no
+    reference: ''
+  39a701ff-bb4b-48ac-8c0a-8c61bf0d4b8d: !Template
+    answer_choices: No ||| Yes
+    id: 39a701ff-bb4b-48ac-8c0a-8c61bf0d4b8d
+    jinja: '{{sentence}}
+
+      Is this example grammatically correct and sensible?
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: is_this_correct
+    reference: A sample glue template
+  6f49b860-9145-4fcb-b632-9faea39e254e: !Template
+    answer_choices: no ||| yes
+    id: 6f49b860-9145-4fcb-b632-9faea39e254e
+    jinja: 'I''m copy-editing a story for publication. It has the following sentence
+      in it:
+
+      {{sentence}}
+
+      Does this sentence make sense and is it grammatically correct? Please answer
+      {{"yes or no"}}.
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: editing
+    reference: ''
+  79b4c04c-c0e2-4add-a600-d5572da192e7: !Template
+    answer_choices: unacceptable ||| acceptable
+    id: 79b4c04c-c0e2-4add-a600-d5572da192e7
+    jinja: 'The following sentence is either "{{"acceptable"}}", meaning it is grammatically
+      correct and makes sense, or "{{"unacceptable"}}". Which is it?
+
+      {{sentence}}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Following sentence acceptable
+    reference: ''
+  dd33f089-57a1-452b-8bd5-8f1fffd10b60: !Template
+    answer_choices: no ||| yes
+    id: dd33f089-57a1-452b-8bd5-8f1fffd10b60
+    jinja: '{{sentence}}
+
+      I''m worried that sentence didn''t make any sense, or was grammatically incorrect.
+      Was it correct?
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Previous sentence acceptable
+    reference: ''
diff --git a/promptsource/templates/glue/mnli/templates.yaml b/promptsource/templates/glue/mnli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6fb4c36d78af3c766c7e7afc804ec83cc01ab9ae
--- /dev/null
+++ b/promptsource/templates/glue/mnli/templates.yaml
@@ -0,0 +1,98 @@
+dataset: glue
+subset: mnli
+templates:
+  2884f60d-8069-4238-92fa-3314bbf76c3d: !Template
+    answer_choices: yes ||| unclear ||| no
+    id: 2884f60d-8069-4238-92fa-3314bbf76c3d
+    jinja: '{{premise}}
+
+      Is it therefore the case that "{{hypothesis}}"? Please answer {{"yes, no, or
+      unclear"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: is it the case
+    reference: ''
+  a558db31-99e3-45ee-9e24-a4d1b64ed1df: !Template
+    answer_choices: entailment ||| neutral ||| contradiction
+    id: a558db31-99e3-45ee-9e24-a4d1b64ed1df
+    jinja: 'In this task, you need to determine if two passages have one of three
+      relationships: {{"entailment, neural, or contradiction"}}. {{"Entailment"}}
+      means that the first passage implies the second. {{"Neutral"}} means that their
+      relationship is unclear. {{"Contradiction"}} means the first passage contradicts
+      the second. Here are the two passages:
+
+      {{premise}}
+
+      {{hypothesis}}
+
+      Is the relationship {{"entailment, neutral, or contradiction"}}?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: entailment
+    reference: ''
+  a730746a-58c3-4c5d-9e93-a73dbe3661e1: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: a730746a-58c3-4c5d-9e93-a73dbe3661e1
+    jinja: '{{premise}}
+
+      Does this mean that "{{hypothesis}}"? {{"A) yes B) no or C) maybe."}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: does this mean
+    reference: ''
+  ab25652e-20a7-4a1e-a530-916b2adc64a0: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: ab25652e-20a7-4a1e-a530-916b2adc64a0
+    jinja: 'Consider the premise:
+
+      {{premise}}
+
+      and the hypothesis:
+
+      {{hypothesis}}
+
+      Does the hypothesis follow from the premise? {{"A) yes B) no C) maybe"}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: premise hypothesis
+    reference: ''
+  ac71489b-c5e9-4c60-8eae-35eb14cb5545: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: ac71489b-c5e9-4c60-8eae-35eb14cb5545
+    jinja: '{{premise}}
+
+      Does this imply that "{{hypothesis}}"? Please answer {{"yes, no, or maybe"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: imply
+    reference: ''
diff --git a/promptsource/templates/glue/mnli_matched/templates.yaml b/promptsource/templates/glue/mnli_matched/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e8d76f4dea22d462bde5a17bd27926bb38a9d9f1
--- /dev/null
+++ b/promptsource/templates/glue/mnli_matched/templates.yaml
@@ -0,0 +1,98 @@
+dataset: glue
+subset: mnli_matched
+templates:
+  53f051c9-a456-4af7-ac35-aee1c139406d: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: 53f051c9-a456-4af7-ac35-aee1c139406d
+    jinja: '{{premise}}
+
+      Does this imply that "{{hypothesis}}"? Please answer {{"yes, no, or maybe"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: imply
+    reference: ''
+  736510c1-f3a2-4f77-b7c2-5b303240ae73: !Template
+    answer_choices: entailment ||| neutral ||| contradiction
+    id: 736510c1-f3a2-4f77-b7c2-5b303240ae73
+    jinja: 'In this task, you need to determine if two passages have one of three
+      relationships: {{"entailment, neural, or contradiction"}}. {{"Entailment"}}
+      means that the first passage implies the second. {{"Neutral"}} means that their
+      relationship is unclear. {{"Contradiction"}} means the first passage contradicts
+      the second. Here are the two passages:
+
+      {{premise}}
+
+      {{hypothesis}}
+
+      Is the relationship {{"entailment, neutral, or contradiction"}}?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: entailment
+    reference: ''
+  75badf58-7401-43e9-9950-adb3fd61864e: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: 75badf58-7401-43e9-9950-adb3fd61864e
+    jinja: '{{premise}}
+
+      Does this mean that "{{hypothesis}}"? {{"A) yes B) no or C) maybe."}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: does this mean
+    reference: ''
+  8cd9b255-865b-4217-b1ff-4a027158fba8: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: 8cd9b255-865b-4217-b1ff-4a027158fba8
+    jinja: 'Consider the premise:
+
+      {{premise}}
+
+      and the hypothesis:
+
+      {{hypothesis}}
+
+      Does the hypothesis follow from the premise? {{"A) yes B) no C) maybe"}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: premise hypothesis
+    reference: ''
+  cef02970-9c3e-4089-befa-2d01de2eaa12: !Template
+    answer_choices: yes ||| unclear ||| no
+    id: cef02970-9c3e-4089-befa-2d01de2eaa12
+    jinja: '{{premise}}
+
+      Is it therefore the case that "{{hypothesis}}"? Please answer {{"yes, no, or
+      unclear"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: is it the case
+    reference: ''
diff --git a/promptsource/templates/glue/mnli_mismatched/templates.yaml b/promptsource/templates/glue/mnli_mismatched/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..41ed910797c7a65d22eb4ea856b5418baf8cab9a
--- /dev/null
+++ b/promptsource/templates/glue/mnli_mismatched/templates.yaml
@@ -0,0 +1,98 @@
+dataset: glue
+subset: mnli_mismatched
+templates:
+  6e6ffb0a-6981-4fd9-a188-15fe2c07d7f0: !Template
+    answer_choices: yes ||| unclear ||| no
+    id: 6e6ffb0a-6981-4fd9-a188-15fe2c07d7f0
+    jinja: '{{premise}}
+
+      Is it therefore the case that "{{hypothesis}}"? Please answer {{"yes, no, or
+      unclear"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: is it the case
+    reference: ''
+  704397f4-c16e-4737-ad6d-2f282517810f: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: 704397f4-c16e-4737-ad6d-2f282517810f
+    jinja: '{{premise}}
+
+      Does this imply that "{{hypothesis}}"? Please answer {{"yes, no, or maybe"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: imply
+    reference: ''
+  709efd1d-3911-4db7-969b-7fc8600b796c: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: 709efd1d-3911-4db7-969b-7fc8600b796c
+    jinja: 'Consider the premise:
+
+      {{premise}}
+
+      and the hypothesis:
+
+      {{hypothesis}}
+
+      Does the hypothesis follow from the premise? {{"A) yes B) no C) maybe"}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: premise hypothesis
+    reference: ''
+  873607a6-cf11-4fb4-a038-0fe3a843315d: !Template
+    answer_choices: yes ||| maybe ||| no
+    id: 873607a6-cf11-4fb4-a038-0fe3a843315d
+    jinja: '{{premise}}
+
+      Does this mean that "{{hypothesis}}"? {{"A) yes B) no or C) maybe."}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: does this mean
+    reference: ''
+  b249978b-b1ee-440b-ae67-280e6631286b: !Template
+    answer_choices: entailment ||| neutral ||| contradiction
+    id: b249978b-b1ee-440b-ae67-280e6631286b
+    jinja: 'In this task, you need to determine if two passages have one of three
+      relationships: {{"entailment, neural, or contradiction"}}. {{"Entailment"}}
+      means that the first passage implies the second. {{"Neutral"}} means that their
+      relationship is unclear. {{"Contradiction"}} means the first passage contradicts
+      the second. Here are the two passages:
+
+      {{premise}}
+
+      {{hypothesis}}
+
+      Is the relationship {{"entailment, neutral, or contradiction"}}?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: entailment
+    reference: ''
diff --git a/promptsource/templates/glue/mrpc/templates.yaml b/promptsource/templates/glue/mrpc/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..af78b1eef1215d8b590720a23387610fac73ce16
--- /dev/null
+++ b/promptsource/templates/glue/mrpc/templates.yaml
@@ -0,0 +1,146 @@
+dataset: glue
+subset: mrpc
+templates:
+  3b88d2c4-0aeb-4c6d-9ccc-653a388250a5: !Template
+    answer_choices: null
+    id: 3b88d2c4-0aeb-4c6d-9ccc-653a388250a5
+    jinja: '{% if label == 1 %}
+
+      Paraphrase the following sentence: {{sentence1}}
+
+      |||
+
+      {{sentence2}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_paraphrase
+    reference: ''
+  44c2e6d9-facf-4959-8400-38e0eb8dd3a8: !Template
+    answer_choices: no ||| yes
+    id: 44c2e6d9-facf-4959-8400-38e0eb8dd3a8
+    jinja: 'I want to know whether the following two sentences mean the same thing.
+
+      {{sentence1}}
+
+      {{sentence2}}
+
+      Do they?
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: want to know
+    reference: ''
+  adf659af-4e2d-4e7e-ab89-b33cfc0b5a50: !Template
+    answer_choices: no ||| yes
+    id: adf659af-4e2d-4e7e-ab89-b33cfc0b5a50
+    jinja: 'Does the sentence
+
+      {{sentence1}}
+
+      paraphrase (that is, mean the same thing as) this sentence?
+
+      {{sentence2}}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: paraphrase
+    reference: ''
+  bbb395c2-2c70-4eaa-ad2f-2cf18a81da93: !Template
+    answer_choices: not equivalent ||| equivalent
+    id: bbb395c2-2c70-4eaa-ad2f-2cf18a81da93
+    jinja: 'Are the following two sentences "{{"equivalent"}}" or "{{"not equivalent"}}"?
+
+      {{sentence1}}
+
+      {{sentence2}}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: equivalent
+    reference: ''
+  d830d7a5-abc0-4275-ac62-974e0088876f: !Template
+    answer_choices: null
+    id: d830d7a5-abc0-4275-ac62-974e0088876f
+    jinja: '{% if label == 1 %}
+
+      Generate a sentence that means the same thing as this one: {{sentence1}}
+
+      |||
+
+      {{sentence2}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_sentence
+    reference: ''
+  ee82d511-908c-4244-804f-6d0d907c68c7: !Template
+    answer_choices: no ||| yes
+    id: ee82d511-908c-4244-804f-6d0d907c68c7
+    jinja: 'Can I replace the sentence
+
+      {{sentence1}}
+
+      with the sentence
+
+      {{sentence2}}
+
+      and have it mean the same thing?
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: replace
+    reference: ''
+  eefd8606-b936-4d26-b91d-8f4bc38bfcbf: !Template
+    answer_choices: no ||| yes
+    id: eefd8606-b936-4d26-b91d-8f4bc38bfcbf
+    jinja: 'Do the following two sentences mean the same thing?
+
+      {{sentence1}}
+
+      {{sentence2}}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: same thing
+    reference: ''
diff --git a/promptsource/templates/glue/qnli/templates.yaml b/promptsource/templates/glue/qnli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3e0210e8626618d561ea255b0dbaf36296683b32
--- /dev/null
+++ b/promptsource/templates/glue/qnli/templates.yaml
@@ -0,0 +1,91 @@
+dataset: glue
+subset: qnli
+templates:
+  50c3108c-b23c-4691-97be-72438606c840: !Template
+    answer_choices: yes ||| no
+    id: 50c3108c-b23c-4691-97be-72438606c840
+    jinja: '{{sentence}}
+
+      Does that sentence have all you need to answer the question "{{question}}"?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: all
+    reference: ''
+  5f0f24d9-14a7-4588-8dc2-494b4c693b81: !Template
+    answer_choices: yes ||| no
+    id: 5f0f24d9-14a7-4588-8dc2-494b4c693b81
+    jinja: 'Can you answer the question "{{question}}" based only on the following:
+
+      {{sentence}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: based
+    reference: ''
+  c626350d-6c0e-47be-b09e-c9ba1446b027: !Template
+    answer_choices: yes ||| no
+    id: c626350d-6c0e-47be-b09e-c9ba1446b027
+    jinja: 'Does knowing that "{{sentence}}" imply that I know the answer to "{{question}}"
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: imply
+    reference: ''
+  f2403d55-21a7-44bc-8b4c-6921fd7b01f5: !Template
+    answer_choices: yes ||| no
+    id: f2403d55-21a7-44bc-8b4c-6921fd7b01f5
+    jinja: 'I want to know the answer to the following question:
+
+      {{question}}
+
+      All the background I''m provided with is that "{{sentence}}". Is that enough
+      to answer the question?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: want to know
+    reference: ''
+  f44715c4-d787-484e-a912-5456cc2b6741: !Template
+    answer_choices: yes ||| no
+    id: f44715c4-d787-484e-a912-5456cc2b6741
+    jinja: 'Consider the passage:
+
+      {{sentence}}
+
+      and the question:
+
+      {{question}}
+
+      Is it possible to answer this question based only on the information in the
+      passage? {{"A) yes"}} or {{"B) no"}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answerable
+    reference: ''
diff --git a/promptsource/templates/glue/qqp/templates.yaml b/promptsource/templates/glue/qqp/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9fc1f4134961eeefc5d1fda50f08af54797bb346
--- /dev/null
+++ b/promptsource/templates/glue/qqp/templates.yaml
@@ -0,0 +1,88 @@
+dataset: glue
+subset: qqp
+templates:
+  8e711799-a57c-4941-833b-466bedfb80ad: !Template
+    answer_choices: no ||| yes
+    id: 8e711799-a57c-4941-833b-466bedfb80ad
+    jinja: I'm an administrator on the website Quora. There are two posts, one that
+      asks "{{question1}}" and another that asks "{{question2}}". I can merge questions
+      if they are asking the same thing. Can I merge these two questions? ||| {{ answer_choices[label]
+      }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: quora
+    reference: ''
+  94972071-a726-42a3-a726-13f414b65e67: !Template
+    answer_choices: not duplicates ||| duplicates
+    id: 94972071-a726-42a3-a726-13f414b65e67
+    jinja: '{{question1}}
+
+      {{question2}}
+
+      Pick one: These questions are "{{"duplicates"}}" or "{{"not duplicates"}}".
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: duplicate or not
+    reference: ''
+  a45ad5cd-a3ba-4ab2-a728-a9ea0f27102b: !Template
+    answer_choices: no ||| yes
+    id: a45ad5cd-a3ba-4ab2-a728-a9ea0f27102b
+    jinja: Are the questions "{{question1}}" and "{{question2}}" asking the same thing?
+      ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: same thing
+    reference: ''
+  c0182cd1-c7ac-4abe-829f-4651536af951: !Template
+    answer_choices: no ||| yes
+    id: c0182cd1-c7ac-4abe-829f-4651536af951
+    jinja: Can an answer to "{{question1}}" also be used to answer "{{question2}}"?
+      ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: answer
+    reference: ''
+  c0724198-97e7-44a1-89d8-c51e97ce0b04: !Template
+    answer_choices: No ||| Yes
+    id: c0724198-97e7-44a1-89d8-c51e97ce0b04
+    jinja: 'Question 1: {{question1}}
+
+      Question 2: {{question2}}
+
+
+      Do these two questions convey the same meaning? Yes or no? ||| {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: meaning
+    reference: ''
+  fd244bd3-ca3b-4e4f-9722-fd006c50e157: !Template
+    answer_choices: no ||| yes
+    id: fd244bd3-ca3b-4e4f-9722-fd006c50e157
+    jinja: I received the questions "{{question1}}" and "{{question2}}". Are they
+      duplicates? ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: duplicate
+    reference: ''
diff --git a/promptsource/templates/glue/rte/templates.yaml b/promptsource/templates/glue/rte/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7611a13313b777617f1e6cd8b14991f2eefab6fd
--- /dev/null
+++ b/promptsource/templates/glue/rte/templates.yaml
@@ -0,0 +1,91 @@
+dataset: glue
+subset: rte
+templates:
+  03a7ae07-5ddd-46c4-92f3-2152223d44ec: !Template
+    answer_choices: yes ||| no
+    id: 03a7ae07-5ddd-46c4-92f3-2152223d44ec
+    jinja: '{{sentence1}}
+
+      Does this mean that "{{sentence2}}" is true? {{"A) yes or B) no."}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: mean
+    reference: ''
+  4ee6ff27-de63-4e7b-a9d4-82a17eba407a: !Template
+    answer_choices: yes ||| no
+    id: 4ee6ff27-de63-4e7b-a9d4-82a17eba407a
+    jinja: 'Does the claim "{{sentence2}}" follow from the fact that "{{sentence1}}"?
+      Please answer either {{"yes"}} or {{"no"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: "does the claim\u2026 follow the fact\u2026"
+    reference: ''
+  9e2b4267-ec23-44c8-b82a-107e2c890fec: !Template
+    answer_choices: entailment ||| not entailment
+    id: 9e2b4267-ec23-44c8-b82a-107e2c890fec
+    jinja: 'We say that one sentence "{{"entails"}}" another sentence when the first
+      sentence implies the second sentence is true. Consider the following two sentences:
+
+      {{sentence1}}
+
+      {{sentence2}}
+
+      Is the relationship from the first to the second sentence "{{"entailment"}}"
+      or "{{"not entailment"}}"?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: entailment explained
+    reference: ''
+  c8dfc879-40f2-412d-be1e-4cd70107f6e6: !Template
+    answer_choices: yes ||| no
+    id: c8dfc879-40f2-412d-be1e-4cd70107f6e6
+    jinja: 'Does "{{sentence1}}" imply that "{{sentence2}}"? Please answer either
+      {{"yes"}} or {{"no"}}.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: imply
+    reference: ''
+  f56ffced-9b16-431a-8a17-501e63cddf73: !Template
+    answer_choices: yes ||| no
+    id: f56ffced-9b16-431a-8a17-501e63cddf73
+    jinja: '{{sentence1}}
+
+      Does this imply
+
+      {{sentence2}}
+
+      Please answer {{"A) yes or B) no."}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: imply separated
+    reference: ''
diff --git a/promptsource/templates/glue/sst2/templates.yaml b/promptsource/templates/glue/sst2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2dc6537d692e5a3a47c5f0c77f76cf8ec3758c48
--- /dev/null
+++ b/promptsource/templates/glue/sst2/templates.yaml
@@ -0,0 +1,76 @@
+dataset: glue
+subset: sst2
+templates:
+  11d1c505-9232-4c35-82a4-4c3642843e2e: !Template
+    answer_choices: negative ||| positive
+    id: 11d1c505-9232-4c35-82a4-4c3642843e2e
+    jinja: '{{sentence}}
+
+      Question: Was that sentence {{"positive"}} or {{"negative"}}? Answer: ||| {{
+      answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: positive negative after
+    reference: ''
+  228fcae7-7f4c-4e3c-9ac4-e49b26bc103d: !Template
+    answer_choices: negative ||| positive
+    id: 228fcae7-7f4c-4e3c-9ac4-e49b26bc103d
+    jinja: 'I''m reading a review that says "{{sentence}}".
+
+
+      Do you think the review is {{"positive"}} or {{"negative"}}? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: review
+    reference: ''
+  5aa0cea9-0f8d-454d-b25b-b0d4cda273b8: !Template
+    answer_choices: sad ||| happy
+    id: 5aa0cea9-0f8d-454d-b25b-b0d4cda273b8
+    jinja: 'Someone just said to me "{{sentence}}".
+
+
+      Do you think they are {{"sad"}} or {{"happy"}}? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: said
+    reference: ''
+  63c6b2be-8ecd-42ad-88c7-0d1dc1a8323a: !Template
+    answer_choices: negative ||| positive
+    id: 63c6b2be-8ecd-42ad-88c7-0d1dc1a8323a
+    jinja: 'Does the following sentence have a {{"positive"}} or {{"negative"}} sentiment?
+
+      {{sentence}}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: following positive negative
+    reference: ''
+  6dd74cd5-e074-4612-9e96-c17ca88c3bc4: !Template
+    answer_choices: bad ||| good
+    id: 6dd74cd5-e074-4612-9e96-c17ca88c3bc4
+    jinja: Someone sent me an email with the sentence "{{sentence}}". Do you think
+      they are feeling {{"good"}} or {{"bad"}}? ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: happy or mad
+    reference: ''
diff --git a/promptsource/templates/glue/stsb/templates.yaml b/promptsource/templates/glue/stsb/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fc74387aed7cd6b8c82f1d519d322b972db5f61c
--- /dev/null
+++ b/promptsource/templates/glue/stsb/templates.yaml
@@ -0,0 +1,109 @@
+dataset: glue
+subset: stsb
+templates:
+  50e3a541-108c-4b26-a423-956562d9b3af: !Template
+    answer_choices: null
+    id: 50e3a541-108c-4b26-a423-956562d9b3af
+    jinja: Rate on a scale from {{"0.0"}} to {{"5.0"}} how similar the sentences "{{sentence1}}"
+      and "{{sentence2}}" are. ||| {{ (((5*label) | round )/5) }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Pearson Correlation
+      - Spearman Correlation
+      original_task: true
+    name: rank
+    reference: ''
+  88dcb716-d19c-45bc-9d3a-cdf8fff5500b: !Template
+    answer_choices: null
+    id: 88dcb716-d19c-45bc-9d3a-cdf8fff5500b
+    jinja: 'Please rate how similar these two sentences are from {{"0.0"}} to {{"5.0"}}.
+
+      Sentence A: {{sentence1}}
+
+      Sentence B: {{sentence2}}
+
+      |||
+
+      {{ (((5*label) | round )/5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Pearson Correlation
+      - Spearman Correlation
+      original_task: true
+    name: rate
+    reference: ''
+  a552635f-3a9a-497f-ac04-ef414b24eb16: !Template
+    answer_choices: null
+    id: a552635f-3a9a-497f-ac04-ef414b24eb16
+    jinja: 'Please give me a score denoting the similarity of the following two sentences:
+
+      Sentence 1: {{sentence1}}
+
+      Sentence 2: {{sentence2}}
+
+      Your score should be something like {{"3.4"}}, where {{"0.0 means very dissimilar,
+      2.5 means kind of similar, and 5.0 means very similar"}}.
+
+      |||
+
+      {{ (((5*label) | round )/5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Pearson Correlation
+      - Spearman Correlation
+      original_task: true
+    name: examples
+    reference: ''
+  ca75788d-4974-440a-a7b7-c42bae814d59: !Template
+    answer_choices: null
+    id: ca75788d-4974-440a-a7b7-c42bae814d59
+    jinja: 'I need to know how similar these two passages are:
+
+      - {{sentence1}}
+
+      - {{sentence2}}
+
+
+      Question: Can you give me a number from {{"0.0 to 5.0"}} that denotes how similar
+      they are, where {{"0.0"}} means totally dissimilar and {{"5.0"}} means extremely
+      similar?
+
+      Answer:
+
+      |||
+
+      {{ (((5*label) | round )/5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Pearson Correlation
+      - Spearman Correlation
+      original_task: true
+    name: similarity
+    reference: ''
+  d7315518-cfb9-4840-93ab-c52f1bb5e74d: !Template
+    answer_choices: null
+    id: d7315518-cfb9-4840-93ab-c52f1bb5e74d
+    jinja: 'I need to assign a score from {{"0.0 to 5.0"}} that denotes how similar
+      the following two sentences are:
+
+      A: {{sentence1}}
+
+      B: {{sentence2}}
+
+      What score should I assign?
+
+      |||
+
+      {{ (((5*label) | round )/5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Pearson Correlation
+      - Spearman Correlation
+      original_task: true
+    name: score
+    reference: ''
diff --git a/promptsource/templates/glue/wnli/templates.yaml b/promptsource/templates/glue/wnli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f4e63c4240b302bb061b82ce8c46e57cbdee5cd7
--- /dev/null
+++ b/promptsource/templates/glue/wnli/templates.yaml
@@ -0,0 +1,95 @@
+dataset: glue
+subset: wnli
+templates:
+  10c354ee-6f4e-4b04-91e1-29e999a8f3e7: !Template
+    answer_choices: not confident ||| very confident
+    id: 10c354ee-6f4e-4b04-91e1-29e999a8f3e7
+    jinja: 'If it''s true that
+
+      {{sentence1}}
+
+      how {{"confident"}} should I be that
+
+      {{sentence2}}
+
+      {{"very confident or not confident?"}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: confident
+    reference: ''
+  3a0e46cb-0b96-4972-83f6-29a6c6a09ba9: !Template
+    answer_choices: no ||| yes
+    id: 3a0e46cb-0b96-4972-83f6-29a6c6a09ba9
+    jinja: '{{"Entailment"}} means that the second sentence follows from the first
+      sentence. Are the following two sentences an example of entailment?
+
+      {{sentence1}}
+
+      {{sentence2}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: entailment explained
+    reference: ''
+  75f89b05-5a81-401b-8a04-8239211a9a95: !Template
+    answer_choices: no ||| yes
+    id: 75f89b05-5a81-401b-8a04-8239211a9a95
+    jinja: 'Assume that the following is true:
+
+      {{sentence1}}
+
+      Does this mean that "{{sentence2}}"?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: mean
+    reference: ''
+  a244158a-a248-4e34-bef7-66e269dd0815: !Template
+    answer_choices: no ||| yes
+    id: a244158a-a248-4e34-bef7-66e269dd0815
+    jinja: 'Someone told me "{{sentence1}}" Now, I think that "{{sentence2}}" Am I
+      justified in thinking this?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: justified
+    reference: ''
+  a2ce492b-dfd0-4f04-bc44-70c7867ba231: !Template
+    answer_choices: no ||| yes
+    id: a2ce492b-dfd0-4f04-bc44-70c7867ba231
+    jinja: '{{sentence1}}
+
+      {{sentence2}}
+
+      Does the first sentence imply the second sentence?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: imply
+    reference: ''
diff --git a/promptsource/templates/google_wellformed_query/templates.yaml b/promptsource/templates/google_wellformed_query/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..def1fc4bd6fa6cde16c21f9cf10f6665ed541192
--- /dev/null
+++ b/promptsource/templates/google_wellformed_query/templates.yaml
@@ -0,0 +1,69 @@
+dataset: google_wellformed_query
+templates:
+  7462caa6-9fb3-43ed-a883-85f8940ba23d: !Template
+    answer_choices: null
+    id: 7462caa6-9fb3-43ed-a883-85f8940ba23d
+    jinja: When I submitted "{{content}}" to a search engine, I obtained really bad
+      results, is that a surprise?|||{% if 0.5 < rating %}yes{% else %}no{% endif
+      %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: wellformed_5
+    reference: ''
+  80e4797c-2454-4f27-8032-a8191cd3602d: !Template
+    answer_choices: null
+    id: 80e4797c-2454-4f27-8032-a8191cd3602d
+    jinja: '"{{content}}" would work well as a search query, right?|||{% if 0.5 <
+      rating %}yes{% else %}no{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: wellformed_4
+    reference: ''
+  9816d5bf-c4db-42ed-8ac8-2be45fa8a0bb: !Template
+    answer_choices: null
+    id: 9816d5bf-c4db-42ed-8ac8-2be45fa8a0bb
+    jinja: Would "{{content}}" be a good query for a search engine?|||{% if 0.5 <
+      rating %}yes{% else %}no{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: wellformed_1
+    reference: ''
+  9b138603-611a-432d-aa6e-f51a473cf85d: !Template
+    answer_choices: null
+    id: 9b138603-611a-432d-aa6e-f51a473cf85d
+    jinja: 'Do you think a search engine would return valid results for this query:
+      "{{content}}"?|||{% if 0.5 < rating %}yes{% else %}no{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: wellformed_3
+    reference: ''
+  9f3cc358-3746-405e-b5e9-5fc0dedc0b5d: !Template
+    answer_choices: null
+    id: 9f3cc358-3746-405e-b5e9-5fc0dedc0b5d
+    jinja: Given this query "{{content}}", would a search engine know what to look
+      for?|||{% if 0.5 < rating %}yes{% else %}no{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: wellformed_2
+    reference: ''
+  e1c64d17-c623-4a30-b899-5c6a4e44e3d7: !Template
+    answer_choices: null
+    id: e1c64d17-c623-4a30-b899-5c6a4e44e3d7
+    jinja: '"{{content}}" is a well formed query, yes or no?|||{% if 0.5 < rating
+      %}yes{% else %}no{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: wellformed_0
+    reference: ''
diff --git a/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml b/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5752bc97fde35419dd9a48004e28263e834fc4ea
--- /dev/null
+++ b/promptsource/templates/guardian_authorship/cross_genre_1/templates.yaml
@@ -0,0 +1,84 @@
+dataset: guardian_authorship
+subset: cross_genre_1
+templates:
+  026e1ef2-c765-4262-b7b3-a087f38907db: !Template
+    answer_choices: null
+    id: 026e1ef2-c765-4262-b7b3-a087f38907db
+    jinja: "Who could have authored this article based on the writing style?\n\n{{article}}\
+      \ |||\n{{\n[\n  \"catherinebennett\",\n  \"georgemonbiot\",\n  \"hugoyoung\"\
+      ,\n  \"jonathanfreedland\",\n  \"martinkettle\",\n  \"maryriddell\",\n  \"nickcohen\"\
+      ,\n  \"peterpreston\",\n  \"pollytoynbee\",\n  \"royhattersley\",\n  \"simonhoggart\"\
+      ,\n  \"willhutton\",\n  \"zoewilliams\"\n][author]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: writing_style
+    reference: ''
+  12982397-c0c3-49a9-b3ac-38735908428b: !Template
+    answer_choices: null
+    id: 12982397-c0c3-49a9-b3ac-38735908428b
+    jinja: "Generate an article based on the writing style of  {{\n[\n  \"catherinebennett\"\
+      ,\n  \"georgemonbiot\",\n  \"hugoyoung\",\n  \"jonathanfreedland\",\n  \"martinkettle\"\
+      ,\n  \"maryriddell\",\n  \"nickcohen\",\n  \"peterpreston\",\n  \"pollytoynbee\"\
+      ,\n  \"royhattersley\",\n  \"simonhoggart\",\n  \"willhutton\",\n  \"zoewilliams\"\
+      \n] [author]\n}}  on the topic of {{\n[\n  \"Politics\",\n  \"Society\",\n \
+      \ \"UK\",\n  \"World\",\n  \"Books\"\n] [topic]\n}} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_author_topic
+    reference: ''
+  1b9fb5f9-6d2a-45ad-8ad4-dc199ee181b6: !Template
+    answer_choices: null
+    id: 1b9fb5f9-6d2a-45ad-8ad4-dc199ee181b6
+    jinja: "Generate an article on the topic of {{[\n  \"Politics\",\n  \"Society\"\
+      ,\n  \"UK\",\n  \"World\",\n  \"Books\"\n][topic] }} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_topic
+    reference: ''
+  68bfa6a4-a89c-4be2-aa0b-cce1103e3ecf: !Template
+    answer_choices: null
+    id: 68bfa6a4-a89c-4be2-aa0b-cce1103e3ecf
+    jinja: "What is the topic of this article?\n\n{{article}} |||\n{{\n[\n  \"Politics\"\
+      ,\n  \"Society\",\n  \"UK\",\n  \"World\",\n  \"Books\"\n][topic]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: topic
+    reference: ''
+  dc46136b-69d1-484e-9b5f-accfb4ba22df: !Template
+    answer_choices: null
+    id: dc46136b-69d1-484e-9b5f-accfb4ba22df
+    jinja: "Generate an article based on the writing style of {{\n[\n  \"catherinebennett\"\
+      ,\n  \"georgemonbiot\",\n  \"hugoyoung\",\n  \"jonathanfreedland\",\n  \"martinkettle\"\
+      ,\n  \"maryriddell\",\n  \"nickcohen\",\n  \"peterpreston\",\n  \"pollytoynbee\"\
+      ,\n  \"royhattersley\",\n  \"simonhoggart\",\n  \"willhutton\",\n  \"zoewilliams\"\
+      \n] [author]\n}} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_author
+    reference: ''
+  e885498a-04f9-4db9-bc01-d1324803315a: !Template
+    answer_choices: null
+    id: e885498a-04f9-4db9-bc01-d1324803315a
+    jinja: "Who wrote this article and what is the article's topic?\n\n{{article}}\
+      \ |||\n{{[\n  \"catherinebennett\",\n  \"georgemonbiot\",\n  \"hugoyoung\",\n\
+      \  \"jonathanfreedland\",\n  \"martinkettle\",\n  \"maryriddell\",\n  \"nickcohen\"\
+      ,\n  \"peterpreston\",\n  \"pollytoynbee\",\n  \"royhattersley\",\n  \"simonhoggart\"\
+      ,\n  \"willhutton\",\n  \"zoewilliams\"\n][author]}} wrote this article on the\
+      \ topic of {{[\n  \"Politics\",\n  \"Society\",\n  \"UK\",\n  \"World\",\n \
+      \ \"Books\"\n][topic]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: who_what_article
+    reference: ''
diff --git a/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml b/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5d046d4ee501f4f341ec7cdf3484a1cfe3e92eff
--- /dev/null
+++ b/promptsource/templates/guardian_authorship/cross_topic_1/templates.yaml
@@ -0,0 +1,84 @@
+dataset: guardian_authorship
+subset: cross_topic_1
+templates:
+  18cea428-59ae-4db1-b2ee-6c44fb39dc71: !Template
+    answer_choices: null
+    id: 18cea428-59ae-4db1-b2ee-6c44fb39dc71
+    jinja: "Who wrote this article and what is the article's topic?\n\n{{article}}\
+      \ |||\n{{[\n  \"catherinebennett\",\n  \"georgemonbiot\",\n  \"hugoyoung\",\n\
+      \  \"jonathanfreedland\",\n  \"martinkettle\",\n  \"maryriddell\",\n  \"nickcohen\"\
+      ,\n  \"peterpreston\",\n  \"pollytoynbee\",\n  \"royhattersley\",\n  \"simonhoggart\"\
+      ,\n  \"willhutton\",\n  \"zoewilliams\"\n][author]}} wrote this article on the\
+      \ topic of {{[\n  \"Politics\",\n  \"Society\",\n  \"UK\",\n  \"World\",\n \
+      \ \"Books\"\n][topic]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: who_what_article
+    reference: ''
+  3b4cc95c-f88c-4b51-add5-32ffdebfdfc6: !Template
+    answer_choices: null
+    id: 3b4cc95c-f88c-4b51-add5-32ffdebfdfc6
+    jinja: "Generate an article based on the writing style of  {{\n[\n  \"catherinebennett\"\
+      ,\n  \"georgemonbiot\",\n  \"hugoyoung\",\n  \"jonathanfreedland\",\n  \"martinkettle\"\
+      ,\n  \"maryriddell\",\n  \"nickcohen\",\n  \"peterpreston\",\n  \"pollytoynbee\"\
+      ,\n  \"royhattersley\",\n  \"simonhoggart\",\n  \"willhutton\",\n  \"zoewilliams\"\
+      \n] [author]\n}}  on the topic of {{\n[\n  \"Politics\",\n  \"Society\",\n \
+      \ \"UK\",\n  \"World\",\n  \"Books\"\n] [topic]\n}} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_author_topic
+    reference: ''
+  a19222b2-6edd-479b-a30c-96d2497216e5: !Template
+    answer_choices: null
+    id: a19222b2-6edd-479b-a30c-96d2497216e5
+    jinja: "What is the topic of this article?\n\n{{article}} |||\n{{\n[\n  \"Politics\"\
+      ,\n  \"Society\",\n  \"UK\",\n  \"World\",\n  \"Books\"\n][topic]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: topic
+    reference: ''
+  d617f1c6-114f-4fd7-81d4-7b7e12f353b0: !Template
+    answer_choices: null
+    id: d617f1c6-114f-4fd7-81d4-7b7e12f353b0
+    jinja: "Who could have authored this article based on the writing style?\n\n{{article}}\
+      \ |||\n{{\n[\n  \"catherinebennett\",\n  \"georgemonbiot\",\n  \"hugoyoung\"\
+      ,\n  \"jonathanfreedland\",\n  \"martinkettle\",\n  \"maryriddell\",\n  \"nickcohen\"\
+      ,\n  \"peterpreston\",\n  \"pollytoynbee\",\n  \"royhattersley\",\n  \"simonhoggart\"\
+      ,\n  \"willhutton\",\n  \"zoewilliams\"\n][author]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: writing_style
+    reference: ''
+  f22055a0-478e-4ace-9d0b-82986ad77919: !Template
+    answer_choices: null
+    id: f22055a0-478e-4ace-9d0b-82986ad77919
+    jinja: "Generate an article based on the writing style of {{\n[\n  \"catherinebennett\"\
+      ,\n  \"georgemonbiot\",\n  \"hugoyoung\",\n  \"jonathanfreedland\",\n  \"martinkettle\"\
+      ,\n  \"maryriddell\",\n  \"nickcohen\",\n  \"peterpreston\",\n  \"pollytoynbee\"\
+      ,\n  \"royhattersley\",\n  \"simonhoggart\",\n  \"willhutton\",\n  \"zoewilliams\"\
+      \n] [author]\n}} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_author
+    reference: ''
+  f289839f-7fdb-49d7-ab66-dacd6e583e04: !Template
+    answer_choices: null
+    id: f289839f-7fdb-49d7-ab66-dacd6e583e04
+    jinja: "Generate an article on the topic of {{[\n  \"Politics\",\n  \"Society\"\
+      ,\n  \"UK\",\n  \"World\",\n  \"Books\"\n][topic] }} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_topic
+    reference: ''
diff --git a/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml b/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ce48d3109b24797449b890a4714d52037db937b3
--- /dev/null
+++ b/promptsource/templates/guardian_authorship/cross_topic_4/templates.yaml
@@ -0,0 +1,84 @@
+dataset: guardian_authorship
+subset: cross_topic_4
+templates:
+  3951d79c-408b-4895-8226-3033d8784d2c: !Template
+    answer_choices: null
+    id: 3951d79c-408b-4895-8226-3033d8784d2c
+    jinja: "Who wrote this article and what is the article's topic?\n\n{{article}}\
+      \ |||\n{{[\n  \"catherinebennett\",\n  \"georgemonbiot\",\n  \"hugoyoung\",\n\
+      \  \"jonathanfreedland\",\n  \"martinkettle\",\n  \"maryriddell\",\n  \"nickcohen\"\
+      ,\n  \"peterpreston\",\n  \"pollytoynbee\",\n  \"royhattersley\",\n  \"simonhoggart\"\
+      ,\n  \"willhutton\",\n  \"zoewilliams\"\n][author]}} wrote this article on the\
+      \ topic of {{[\n  \"Politics\",\n  \"Society\",\n  \"UK\",\n  \"World\",\n \
+      \ \"Books\"\n][topic]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: who_what_article
+    reference: ''
+  4998d29d-7042-439b-8346-c4f93bd11cbc: !Template
+    answer_choices: null
+    id: 4998d29d-7042-439b-8346-c4f93bd11cbc
+    jinja: "Who could have authored this article based on the writing style?\n\n{{article}}\
+      \ |||\n{{\n[\n  \"catherinebennett\",\n  \"georgemonbiot\",\n  \"hugoyoung\"\
+      ,\n  \"jonathanfreedland\",\n  \"martinkettle\",\n  \"maryriddell\",\n  \"nickcohen\"\
+      ,\n  \"peterpreston\",\n  \"pollytoynbee\",\n  \"royhattersley\",\n  \"simonhoggart\"\
+      ,\n  \"willhutton\",\n  \"zoewilliams\"\n][author]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: writing_style
+    reference: ''
+  4ef96141-81c1-488c-92b9-5d35a3a12afa: !Template
+    answer_choices: null
+    id: 4ef96141-81c1-488c-92b9-5d35a3a12afa
+    jinja: "Generate an article based on the writing style of {{\n[\n  \"catherinebennett\"\
+      ,\n  \"georgemonbiot\",\n  \"hugoyoung\",\n  \"jonathanfreedland\",\n  \"martinkettle\"\
+      ,\n  \"maryriddell\",\n  \"nickcohen\",\n  \"peterpreston\",\n  \"pollytoynbee\"\
+      ,\n  \"royhattersley\",\n  \"simonhoggart\",\n  \"willhutton\",\n  \"zoewilliams\"\
+      \n] [author]\n}} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_author
+    reference: ''
+  54e1f0ac-1e17-43bb-85ee-3f852fcccb10: !Template
+    answer_choices: null
+    id: 54e1f0ac-1e17-43bb-85ee-3f852fcccb10
+    jinja: "What is the topic of this article?\n\n{{article}} |||\n{{\n[\n  \"Politics\"\
+      ,\n  \"Society\",\n  \"UK\",\n  \"World\",\n  \"Books\"\n][topic]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: topic
+    reference: ''
+  93d06e87-f328-415d-8fda-f4732165736d: !Template
+    answer_choices: null
+    id: 93d06e87-f328-415d-8fda-f4732165736d
+    jinja: "Generate an article based on the writing style of  {{\n[\n  \"catherinebennett\"\
+      ,\n  \"georgemonbiot\",\n  \"hugoyoung\",\n  \"jonathanfreedland\",\n  \"martinkettle\"\
+      ,\n  \"maryriddell\",\n  \"nickcohen\",\n  \"peterpreston\",\n  \"pollytoynbee\"\
+      ,\n  \"royhattersley\",\n  \"simonhoggart\",\n  \"willhutton\",\n  \"zoewilliams\"\
+      \n] [author]\n}}  on the topic of {{\n[\n  \"Politics\",\n  \"Society\",\n \
+      \ \"UK\",\n  \"World\",\n  \"Books\"\n] [topic]\n}} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_author_topic
+    reference: ''
+  b89bb96c-e3c7-4e8a-beab-658800526864: !Template
+    answer_choices: null
+    id: b89bb96c-e3c7-4e8a-beab-658800526864
+    jinja: "Generate an article on the topic of {{[\n  \"Politics\",\n  \"Society\"\
+      ,\n  \"UK\",\n  \"World\",\n  \"Books\"\n][topic] }} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_topic
+    reference: ''
diff --git a/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml b/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f52735d4741602ed4944260f6378d7efc783d92d
--- /dev/null
+++ b/promptsource/templates/guardian_authorship/cross_topic_7/templates.yaml
@@ -0,0 +1,84 @@
+dataset: guardian_authorship
+subset: cross_topic_7
+templates:
+  6752a104-6037-4c8d-9cc3-7b88b97e5142: !Template
+    answer_choices: null
+    id: 6752a104-6037-4c8d-9cc3-7b88b97e5142
+    jinja: "Who wrote this article and what is the article's topic?\n\n{{article}}\
+      \ |||\n{{[\n  \"catherinebennett\",\n  \"georgemonbiot\",\n  \"hugoyoung\",\n\
+      \  \"jonathanfreedland\",\n  \"martinkettle\",\n  \"maryriddell\",\n  \"nickcohen\"\
+      ,\n  \"peterpreston\",\n  \"pollytoynbee\",\n  \"royhattersley\",\n  \"simonhoggart\"\
+      ,\n  \"willhutton\",\n  \"zoewilliams\"\n][author]}} wrote this article on the\
+      \ topic of {{[\n  \"Politics\",\n  \"Society\",\n  \"UK\",\n  \"World\",\n \
+      \ \"Books\"\n][topic]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: who_what_article
+    reference: ''
+  794dff9a-dd24-4e67-9cb9-c67773b3d09d: !Template
+    answer_choices: null
+    id: 794dff9a-dd24-4e67-9cb9-c67773b3d09d
+    jinja: "Generate an article based on the writing style of {{\n[\n  \"catherinebennett\"\
+      ,\n  \"georgemonbiot\",\n  \"hugoyoung\",\n  \"jonathanfreedland\",\n  \"martinkettle\"\
+      ,\n  \"maryriddell\",\n  \"nickcohen\",\n  \"peterpreston\",\n  \"pollytoynbee\"\
+      ,\n  \"royhattersley\",\n  \"simonhoggart\",\n  \"willhutton\",\n  \"zoewilliams\"\
+      \n] [author]\n}} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_author
+    reference: ''
+  96a5ad6e-0d4e-4fc8-9429-79ef7e444e96: !Template
+    answer_choices: null
+    id: 96a5ad6e-0d4e-4fc8-9429-79ef7e444e96
+    jinja: "What is the topic of this article?\n\n{{article}} |||\n{{\n[\n  \"Politics\"\
+      ,\n  \"Society\",\n  \"UK\",\n  \"World\",\n  \"Books\"\n][topic]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: topic
+    reference: ''
+  9ffa7dc2-c8e5-4794-8d2c-671b68b007fc: !Template
+    answer_choices: null
+    id: 9ffa7dc2-c8e5-4794-8d2c-671b68b007fc
+    jinja: "Generate an article based on the writing style of  {{\n[\n  \"catherinebennett\"\
+      ,\n  \"georgemonbiot\",\n  \"hugoyoung\",\n  \"jonathanfreedland\",\n  \"martinkettle\"\
+      ,\n  \"maryriddell\",\n  \"nickcohen\",\n  \"peterpreston\",\n  \"pollytoynbee\"\
+      ,\n  \"royhattersley\",\n  \"simonhoggart\",\n  \"willhutton\",\n  \"zoewilliams\"\
+      \n] [author]\n}}  on the topic of {{\n[\n  \"Politics\",\n  \"Society\",\n \
+      \ \"UK\",\n  \"World\",\n  \"Books\"\n] [topic]\n}} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_author_topic
+    reference: ''
+  d89e8c33-35c4-4c0c-a6c9-52460ed20f7f: !Template
+    answer_choices: null
+    id: d89e8c33-35c4-4c0c-a6c9-52460ed20f7f
+    jinja: "Who could have authored this article based on the writing style?\n\n{{article}}\
+      \ |||\n{{\n[\n  \"catherinebennett\",\n  \"georgemonbiot\",\n  \"hugoyoung\"\
+      ,\n  \"jonathanfreedland\",\n  \"martinkettle\",\n  \"maryriddell\",\n  \"nickcohen\"\
+      ,\n  \"peterpreston\",\n  \"pollytoynbee\",\n  \"royhattersley\",\n  \"simonhoggart\"\
+      ,\n  \"willhutton\",\n  \"zoewilliams\"\n][author]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: writing_style
+    reference: ''
+  e6b9c224-1632-40da-8c69-76986da7015d: !Template
+    answer_choices: null
+    id: e6b9c224-1632-40da-8c69-76986da7015d
+    jinja: "Generate an article on the topic of {{[\n  \"Politics\",\n  \"Society\"\
+      ,\n  \"UK\",\n  \"World\",\n  \"Books\"\n][topic] }} |||\n{{article}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: article_from_topic
+    reference: ''
diff --git a/promptsource/templates/gutenberg_time/templates.yaml b/promptsource/templates/gutenberg_time/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1297891f990b536dae36ef2a9e2ca582afd30ffc
--- /dev/null
+++ b/promptsource/templates/gutenberg_time/templates.yaml
@@ -0,0 +1,92 @@
+dataset: gutenberg_time
+templates:
+  06dce7dd-ae32-4acb-a1c8-6a01303b577b: !Template
+    answer_choices: null
+    id: 06dce7dd-ae32-4acb-a1c8-6a01303b577b
+    jinja: "Given the following text. What time reference is reported in the text?\n\
+      \n{{tok_context}}\n\nThe time reference reported is \n|||\n{{time_phrase}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: asking_the_time_reference_before
+    reference: Asking the time reference before the text
+  1e880fc4-6df7-4cab-8658-82cae44135cf: !Template
+    answer_choices: null
+    id: 1e880fc4-6df7-4cab-8658-82cae44135cf
+    jinja: "{{tok_context}}\n\nGiven the previous text. What time reference is reported\
+      \ in the text?\n\nThe time reference reported is \n|||\n{{time_phrase}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: asking_the_time_refence_after
+    reference: ''
+  27e6ef41-5f29-485a-9fa8-7b71feb956c8: !Template
+    answer_choices: null
+    id: 27e6ef41-5f29-485a-9fa8-7b71feb956c8
+    jinja: '{{tok_context}}
+
+
+      Given the previous text. What time reference is reported in the text? What time
+      does it indicate?
+
+
+      |||
+
+
+      The time reference reported is "{{time_phrase}}".
+
+      It indicates {{hour_reference}}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: asking_the_time_reference_and_actual_time_after
+    reference: Asking time reference and actual time after the text
+  4efa58a3-a38b-4bcd-8597-687a7b7f56f8: !Template
+    answer_choices: null
+    id: 4efa58a3-a38b-4bcd-8597-687a7b7f56f8
+    jinja: "Given the following text. What hour (between 0 and 23) does the phrase\
+      \ \"{{time_phrase}}\" indicate?\n\n{{tok_context}}\n\nIt indicates \n|||\n{{hour_reference}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: asking_the_time_phrase_explicit_before
+    reference: Ask for the time reported in the text, explicitly mentioning the phrase,
+      asking the question before the text
+  75cbe764-02f9-4183-9be4-b7bba3d3b1f6: !Template
+    answer_choices: null
+    id: 75cbe764-02f9-4183-9be4-b7bba3d3b1f6
+    jinja: "{{tok_context}}\n\nGiven the previous text. What hour (between 0 and 23)\
+      \ does the phrase \"{{time_phrase}}\" indicate?\n\nIt indicates \n|||\n{{hour_reference}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: asking_the_time_phrase_explicit_after
+    reference: Ask for the time reported in the text, explicitly mentioning the phrase,
+      asking the question after the text
+  9004b87b-2731-4951-976a-9269e28be7be: !Template
+    answer_choices: null
+    id: 9004b87b-2731-4951-976a-9269e28be7be
+    jinja: 'Given the following text. What time reference is reported in the text?
+      What time does it indicate?
+
+
+      {{tok_context}}
+
+
+      |||
+
+
+      The time reference reported is "{{time_phrase}}".
+
+      It indicates {{hour_reference}}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: asking_the_time_reference_and_actual_time_before
+    reference: ' Asking time reference and actual time before the text'
diff --git a/promptsource/templates/hans/templates.yaml b/promptsource/templates/hans/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d980bb9ee715bd6a3e0248e516a86571f9b1ee50
--- /dev/null
+++ b/promptsource/templates/hans/templates.yaml
@@ -0,0 +1,124 @@
+dataset: hans
+templates:
+  03fc899d-aa53-4bbd-8808-d390b2a30f86: !Template
+    answer_choices: Yes ||| No
+    id: 03fc899d-aa53-4bbd-8808-d390b2a30f86
+    jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes\
+      \ or no? ||| {{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does this imply
+    reference: v0.1
+  2084c370-6052-4840-89b6-b35ad70fdf31: !Template
+    answer_choices: Yes ||| No
+    id: 2084c370-6052-4840-89b6-b35ad70fdf31
+    jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes
+      or no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: should assume
+    reference: Webson & Pavlick 2021
+  559dec8c-5ecc-4ff6-9765-7358e5b675d3: !Template
+    answer_choices: Yes ||| No
+    id: 559dec8c-5ecc-4ff6-9765-7358e5b675d3
+    jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+      Yes or no? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+  591a436a-588d-4356-9c3c-7f2ddbb3ba55: !Template
+    answer_choices: Yes ||| No
+    id: 591a436a-588d-4356-9c3c-7f2ddbb3ba55
+    jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes or no? |||
+      {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does it follow that
+    reference: v0.1
+  6ed3823e-5ebb-4398-8366-273047d970f0: !Template
+    answer_choices: Yes ||| No
+    id: 6ed3823e-5ebb-4398-8366-273047d970f0
+    jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes or
+      no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: guaranteed true
+    reference: Webson & Pavlick 2021
+  b12b3a20-3cc2-42a8-899e-4ef71a72e484: !Template
+    answer_choices: Yes ||| No
+    id: b12b3a20-3cc2-42a8-899e-4ef71a72e484
+    jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+      Yes or no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: must be true
+    reference: v0.1
+  c5508a95-1f23-47b9-aed4-0eca8380f71b: !Template
+    answer_choices: Yes ||| No
+    id: c5508a95-1f23-47b9-aed4-0eca8380f71b
+    jinja: '{{premise}} Using only the above description and what you know about the
+      world, is "{{hypothesis}}" definitely correct? Yes or no? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: MNLI crowdsource
+    reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+  d6fad9e1-d882-4d06-8f7f-ce400268df5f: !Template
+    answer_choices: Yes ||| No
+    id: d6fad9e1-d882-4d06-8f7f-ce400268df5f
+    jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes or no?
+      ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: justified in saying
+    reference: Webson & Pavlick 2021
+  e86994a7-2649-4535-acce-57e5aed8d390: !Template
+    answer_choices: True ||| False
+    id: e86994a7-2649-4535-acce-57e5aed8d390
+    jinja: '{{premise}}
+
+      Question: {{hypothesis}} True or False? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style
+    reference: Same as reported in Figure G31 of the GPT-3 paper.
+  ffbc8068-e791-4277-b342-1d7e0e80f825: !Template
+    answer_choices: Yes ||| No
+    id: ffbc8068-e791-4277-b342-1d7e0e80f825
+    jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes or no? |||
+      {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: can we infer
+    reference: Webson & Pavlick 2021
diff --git a/promptsource/templates/head_qa/en/templates.yaml b/promptsource/templates/head_qa/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..273702ff98fcddf6be92c2bfcbf4cb4dd3f38237
--- /dev/null
+++ b/promptsource/templates/head_qa/en/templates.yaml
@@ -0,0 +1,173 @@
+dataset: head_qa
+subset: en
+templates:
+  375b86a3-a869-4473-920c-c00ea789e943: !Template
+    answer_choices: null
+    id: 375b86a3-a869-4473-920c-c00ea789e943
+    jinja: 'Answer/complete the following paragraph about {{category}}:
+
+
+      {{qtext}}
+
+
+      Which one is the correct answer?
+
+
+      {% for answer in answers %}
+
+      {{answer["aid"]}}. {{answer["atext"]}}
+
+      {% endfor %}
+
+
+      |||
+
+
+      Answer number {{ra}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multiple_choice_q_and_a_index_with_context_en
+    reference: Pose a multi-choice question using the index as an answer and the category
+      as context
+  749a5c3f-c10e-4a4a-aa35-d31698bb1104: !Template
+    answer_choices: null
+    id: 749a5c3f-c10e-4a4a-aa35-d31698bb1104
+    jinja: 'Answer/complete the following paragraph:
+
+
+      {{qtext}}
+
+
+      What is the correct answer?
+
+      - {{ answers | map(attribute="atext")| join("\n- ") }}
+
+
+      |||
+
+
+      {% for answer in answers if answer["aid"]==ra -%}
+
+      {{answer["atext"]}}
+
+      {%- endfor %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multiple_choice_q_and_a_en
+    reference: Pose a multi-choice question
+  c830f4cc-128c-4644-9e19-4c99782f70bb: !Template
+    answer_choices: null
+    id: c830f4cc-128c-4644-9e19-4c99782f70bb
+    jinja: 'Answer/complete the following paragraph:
+
+
+      {{qtext}}
+
+
+      Which one is the correct answer?
+
+
+      {% for answer in answers %}
+
+      {{answer["aid"]}}. {{answer["atext"]}}
+
+      {% endfor %}
+
+
+      |||
+
+
+      Answer number {{ra}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multiple_choice_q_and_a_index_en
+    reference: Pose a multi-choice question using as anwer the index
+  df12d7e1-2168-46e0-9400-c3a7ca27b42c: !Template
+    answer_choices: null
+    id: df12d7e1-2168-46e0-9400-c3a7ca27b42c
+    jinja: 'Answer/complete the following paragraph about {{category}}:
+
+
+      {{qtext}}
+
+
+      What is the correct answer?
+
+      - {{ answers | map(attribute="atext")| join("\n- ") }}
+
+
+      |||
+
+
+      {% for answer in answers if answer["aid"]==ra -%}
+
+      {{answer["atext"]}}
+
+      {%- endfor %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multiple_choice_q_and_a_with_context_en
+    reference: Pose a multi-choice question using category information as context
+  e0cb8056-22b4-4878-8164-a79cfc5d3a62: !Template
+    answer_choices: null
+    id: e0cb8056-22b4-4878-8164-a79cfc5d3a62
+    jinja: 'Given this list of statements about {{category}}: {{ answers | map(attribute="atext")
+      | map("lower") | map("trim", ".") | join(", ") }}.
+
+
+      Which one is the most appropriate answer/completion for the paragraph that follows?
+
+
+      {{qtext}}
+
+
+      |||
+
+
+      {% for answer in answers if answer["aid"]==ra -%}
+
+      {{answer["atext"]}}
+
+      {%- endfor %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multiple_choice_a_and_q_with_context_en
+    reference: Pose a multi-choice question presenting the answers first using category
+      as context
+  e4f4e194-a78b-433b-ac48-dabf6244be35: !Template
+    answer_choices: null
+    id: e4f4e194-a78b-433b-ac48-dabf6244be35
+    jinja: 'Given this list of statements: {{ answers | map(attribute="atext") | map("lower")
+      | map("trim", ".") | join(", ") }}.
+
+
+      Which one is the most appropriate answer/completion for the paragraph that follows?
+
+
+      {{qtext}}
+
+
+      |||
+
+
+      {% for answer in answers if answer["aid"]==ra -%}
+
+      {{answer["atext"]}}
+
+      {%- endfor %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: multiple_choice_a_and_q_en
+    reference: Pose a multi-choice question presenting the answers first
diff --git a/promptsource/templates/health_fact/templates.yaml b/promptsource/templates/health_fact/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f5ffe38500b34c633e27d4cb49871d17348beb8d
--- /dev/null
+++ b/promptsource/templates/health_fact/templates.yaml
@@ -0,0 +1,90 @@
+dataset: health_fact
+templates:
+  041ffdd5-88b0-41df-9e22-8c7a7bc0ce24: !Template
+    answer_choices: null
+    id: 041ffdd5-88b0-41df-9e22-8c7a7bc0ce24
+    jinja: "After reading:\n\n {{main_text }}\n\nI  {{[\"do not believe\", \"do not\
+      \ think it is completely true that\", \"believe\", \"do not think it has been\
+      \ proven yet that\"][label]}}  :\n\n{{claim}}\n\nis true because of: |||\n\n\
+      {{explanation}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: main_explanation_claim2
+    reference: ''
+  23a2ef7f-2032-4771-bf39-94b840aee763: !Template
+    answer_choices: definitely false ||| likely false ||| definitely true ||| not
+      proven
+    id: 23a2ef7f-2032-4771-bf39-94b840aee763
+    jinja: "After reading:\n\n {{main_text }}\n\nI believe :\n\n{{claim}}\n\nis \n\
+      |||\n\n{{answer_choices[label]}} ."
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: main_explanation_claim5
+    reference: ''
+  277f3961-5e9b-4cd5-a13c-f822f6541c76: !Template
+    answer_choices: False ||| Mixture ||| True ||| Unproven
+    id: 277f3961-5e9b-4cd5-a13c-f822f6541c76
+    jinja: "The claim is: \n\n{{claim}} and the explanation is {{explanation}}. From\
+      \ above this is |||\n\n {{answer_choices[label]}} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: claim_explanation
+    reference: Read the claim and rely on explanation and main text
+  5000d89e-a93e-4b96-9ad4-b93924e1066b: !Template
+    answer_choices: null
+    id: 5000d89e-a93e-4b96-9ad4-b93924e1066b
+    jinja: 'I  {{["could not conclude", "could not say for sure", "could conclude",
+      "do not think it has been proven yet"][label]}} :
+
+
+      {{claim}}
+
+
+      from:
+
+
+      {{main_text}}
+
+
+      because of: |||
+
+
+      {{explanation}}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: main_explanation_claim3
+    reference: ''
+  6ca1299e-216d-4111-bfb8-a4de2c2bee36: !Template
+    answer_choices: cannot ||| cannot ||| can ||| cannot
+    id: 6ca1299e-216d-4111-bfb8-a4de2c2bee36
+    jinja: "Given the:\n\n{{main_text}}\n\nand the explanation: \n\n{{explanation}}.\
+      \ \n\nThe claim is {{claim}}. This \n\n|||\n\n {{answer_choices[label]}}  be\
+      \ true."
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: main_explanation_claim4
+    reference: it is only true if the entire claim is true
+  a7a041a1-07cf-4787-b19a-ae04e270dd25: !Template
+    answer_choices: definitely false ||| likely false ||| true ||| not proven
+    id: a7a041a1-07cf-4787-b19a-ae04e270dd25
+    jinja: "After reading:\n\n {{main_text }}\n\nI believe :\n\n{{claim}} is\n\n|||\n\
+      \n{{answer_choices[label]}} ."
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: main_explanation_claim
+    reference: ''
diff --git a/promptsource/templates/hellaswag/templates.yaml b/promptsource/templates/hellaswag/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c3d90483b58c490843251f63e9ea150a76581736
--- /dev/null
+++ b/promptsource/templates/hellaswag/templates.yaml
@@ -0,0 +1,236 @@
+dataset: hellaswag
+templates:
+  00caa8cb-7f67-43bc-9e90-fc1d5d329432: !Template
+    answer_choices: '{{endings | join(" ||| ")}}'
+    id: 00caa8cb-7f67-43bc-9e90-fc1d5d329432
+    jinja: 'Complete the description with an appropriate ending:
+
+      First, {{ ctx_a.lower() }} Then, {{ ctx_b.lower() }} ...
+
+
+      (a) {{ answer_choices[0] }}
+
+
+      (b) {{ answer_choices[1] }}
+
+
+      (c) {{ answer_choices[2] }}
+
+
+      (d) {{ answer_choices[3] }}
+
+      |||
+
+      {{ answer_choices[label | int()] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: complete_first_then
+    reference: ''
+  196a016d-bd25-4387-90b9-53197fd43b1e: !Template
+    answer_choices: null
+    id: 196a016d-bd25-4387-90b9-53197fd43b1e
+    jinja: "What is the topic of the sentence: {{ctx}} \n|||\n{{activity_label}} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Topic without the ending answer
+    reference: Generate the topic sentence with only the starting sentence
+  1fd44f45-d0e6-41ad-a01f-737f4c53645b: !Template
+    answer_choices: '{{endings | join(" ||| ")}}'
+    id: 1fd44f45-d0e6-41ad-a01f-737f4c53645b
+    jinja: 'Complete the sentence: {{ctx}}
+
+      |||
+
+      {{answer_choices[label | int()]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Open-ended completion
+    reference: Template for open-ended common sense completion
+  4ebf22a1-2d23-426c-a083-b43fc8567687: !Template
+    answer_choices: null
+    id: 4ebf22a1-2d23-426c-a083-b43fc8567687
+    jinja: "{{ctx}} {{endings[label | int()]}}\nCan you identify the topic of the\
+      \ paragraph? \n|||\n{{activity_label}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Topic of the context
+    reference: List the activity label as the topic from the sentence
+  52fbd075-46cb-49fb-a41b-00a0f4a60285: !Template
+    answer_choices: '{{endings | join(" ||| ") }}'
+    id: 52fbd075-46cb-49fb-a41b-00a0f4a60285
+    jinja: '{% set prompts = [
+
+      ''Can you pick the correct ending for the sentence: '',
+
+      ''The task is to generate the ending for the sentence: '',
+
+      ''How does this sentence end? '',
+
+      ''From the list of endings described below, what ending makes the most sense
+      for the sentence '',]
+
+      %}
+
+      {{prompts | choice}}
+
+      {{ctx}}
+
+
+      (a)  {{answer_choices[0]}}
+
+
+      (b)  {{answer_choices[1]}}
+
+
+      (c)  {{answer_choices[2]}}
+
+
+      (d)  {{answer_choices[3]}}
+
+      |||
+
+      {{answer_choices [label | int()]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Randomized prompts template
+    reference: Original task template with randomized prompt template
+  663470b8-3fab-449c-84ab-6c4738da51b3: !Template
+    answer_choices: Yes ||| No
+    id: 663470b8-3fab-449c-84ab-6c4738da51b3
+    jinja: "{% set instance = [0, 1, 2, 3] | choice %}\nConsider the following description:\
+      \ {{ ctx_a }}\nIs the following an appropriate continuation?\n{{ ctx_b }} {{\
+      \ endings[instance] }}\nYes or No?\n||| \n{% if label  == instance | string()\
+      \ %}\n{{answer_choices[0]}}\n{% else %} \n{{answer_choices[1]}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Appropriate continuation - Yes or No
+    reference: 'The template checks if the two contexts are valid continuations. '
+  6e9f8d7d-9016-45bc-b997-c42aeb2dc944: !Template
+    answer_choices: '{{endings | join("|||")}}'
+    id: 6e9f8d7d-9016-45bc-b997-c42aeb2dc944
+    jinja: 'How does this sentence end?
+
+      {{ctx}}
+
+
+      (a)  {{answer_choices[0]}}
+
+
+      (b)  {{answer_choices[1]}}
+
+
+      (c)  {{answer_choices[2]}}
+
+
+      (d)  {{answer_choices[3]}}
+
+
+      Hint: the topic of the sentence is {{activity_label}}
+
+      |||
+
+      {{answer_choices [label | int()]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Predict ending with hint
+    reference: Predict the ending with the activity label as the hint
+  a8ab00ee-78ad-465b-bbf0-9cd3d242dd7e: !Template
+    answer_choices: null
+    id: a8ab00ee-78ad-465b-bbf0-9cd3d242dd7e
+    jinja: 'How would you start the sentence:
+
+      {{endings[label | int()]}}
+
+      |||
+
+      {{ctx}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Open-ended start
+    reference: Template asks the model to generate a premise or start for the ending
+  d95b81c4-5db7-44c1-926e-c7222c896a32: !Template
+    answer_choices: Yes ||| No
+    id: d95b81c4-5db7-44c1-926e-c7222c896a32
+    jinja: "{% set instance = [0, 1, 2, 3] | choice %}\nConsider the following text:\
+      \ {{ ctx_b }} {{ endings[instance] }}\nIs it an appropriate continuation of\
+      \ the following text: \n{{ ctx_a }} ?\nYes or No?\n||| \n{% if label  == instance\
+      \ | string() %}\n{{answer_choices[0]}}\n{% else %} \n{{answer_choices[1]}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Reversed appropriate continuation - Yes or No
+    reference: The template randomly selects a continuation and checks if the continuation
+      is appropriate for the given premise.
+  db8b1c25-f2db-4604-b8fc-f93d69d6fae7: !Template
+    answer_choices: Ending 1 ||| Ending 2 ||| Ending 3 ||| Ending 4
+    id: db8b1c25-f2db-4604-b8fc-f93d69d6fae7
+    jinja: '{{ ctx }}...
+
+      How does the description likely end?
+
+
+      Ending 1: {{ endings[0] }}
+
+
+      Ending 2: {{ endings[1] }}
+
+
+      Ending 3: {{ endings[2] }}
+
+
+      Ending 4: {{ endings[3] }}
+
+      ||| {{ answer_choices[label | int()] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: how_ends
+    reference: ''
+  def77598-682f-48de-b187-55db4d390f07: !Template
+    answer_choices: Ending 1 ||| Ending 2 ||| Ending 3 ||| Ending 4
+    id: def77598-682f-48de-b187-55db4d390f07
+    jinja: "If a description of a situation begins like this: {{ ctx }}... Then how\n\
+      does it continue? \n\nEnding 1: {{ endings[0] }}\n\nEnding 2: {{ endings[1]\
+      \ }}\n\nEnding 3: {{ endings[2] }}\n\nEnding 4: {{ endings[3] }}\n|||{{answer_choices[label\
+      \ | int()] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: if_begins_how_continues
+    reference: Returns a Ending <int> string in the template (similar to how ends
+      paraphrased)
diff --git a/promptsource/templates/hlgd/templates.yaml b/promptsource/templates/hlgd/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5e19a885fc40a09a29487826d71ca61c90481aa7
--- /dev/null
+++ b/promptsource/templates/hlgd/templates.yaml
@@ -0,0 +1,83 @@
+dataset: hlgd
+templates:
+  147ad380-5ce4-4900-b5ec-f01a63bb3653: !Template
+    answer_choices: null
+    id: 147ad380-5ce4-4900-b5ec-f01a63bb3653
+    jinja: "Do the following headlines talk about the same event ? \n{{headline_a}}\n\
+      {{headline_b}}\n|||\n{% if label %}\nYes \n{% else %}\nNo \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hlgd_1
+    reference: ''
+  36154717-e2bc-4414-bfe6-8ed70ebf292d: !Template
+    answer_choices: null
+    id: 36154717-e2bc-4414-bfe6-8ed70ebf292d
+    jinja: "{% if label %}\nGiven the headline : {{headline_a}}, what would be another\
+      \ headline for the same event ? \n|||\n{{headline_b}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hlgd_2
+    reference: ''
+  3c655244-779c-4a34-9ab0-722bcdc8567b: !Template
+    answer_choices: null
+    id: 3c655244-779c-4a34-9ab0-722bcdc8567b
+    jinja: "Which one of the following choices \"same event\" or \"different event\"\
+      \ best describe the relation between these two pieces of news \n{{headline_a}}\n\
+      {{headline_b}}\n|||\n{% if label %}\nsame event\n{% else %}\ndifferent event\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hlgd_6
+    reference: ''
+  4d97b99c-e438-403e-a864-ffbbc193bf63: !Template
+    answer_choices: null
+    id: 4d97b99c-e438-403e-a864-ffbbc193bf63
+    jinja: "Given the headlines : \"{{headline_a}}\" and  \"{{headline_b}}\",  do\
+      \ they talk about the same event ? \n|||\n{% if label %}\nYes \n{% else %}\n\
+      No \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hlgd_5
+    reference: ''
+  7588a4a1-bff3-4543-aaa3-e3b679b6b97b: !Template
+    answer_choices: null
+    id: 7588a4a1-bff3-4543-aaa3-e3b679b6b97b
+    jinja: "Complete the sentence: both \"{{headline_a}}\" and \"{{headline_b}}\"\
+      \ discuss \n|||\n{{[\n  \"different events\",\n  \"the same event\"\n][label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hlgd_4
+    reference: ''
+  7a544e3a-0d2e-45c8-bb15-894f22eeab59: !Template
+    answer_choices: null
+    id: 7a544e3a-0d2e-45c8-bb15-894f22eeab59
+    jinja: "Which year of the following:  {% for n in range(10, 20) %} 20{{n}}, {%\
+      \ endfor %} was this headline published ? \n{{headline_a}}\n|||\n{{date_a.split('-')[0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hlgd_7
+    reference: ''
+  ff3d165e-3c52-42f4-96e0-df125605a379: !Template
+    answer_choices: null
+    id: ff3d165e-3c52-42f4-96e0-df125605a379
+    jinja: "Does the headline \"{{headline_a}}\" published on {{date_a}} talk about\
+      \ the same event as \"{{headline_b}}\" published on {{date_b}} ? \n|||\n{% if\
+      \ label %}\nYes \n{% else %}\nNo \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hlgd_3
+    reference: ''
diff --git a/promptsource/templates/hotpot_qa/distractor/templates.yaml b/promptsource/templates/hotpot_qa/distractor/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2cea78588d5fd8a38131336c8ccc5975377ec01
--- /dev/null
+++ b/promptsource/templates/hotpot_qa/distractor/templates.yaml
@@ -0,0 +1,86 @@
+dataset: hotpot_qa
+subset: distractor
+templates:
+  20242fae-2b56-43db-ae50-734c5ca10c5c: !Template
+    answer_choices: null
+    id: 20242fae-2b56-43db-ae50-734c5ca10c5c
+    jinja: "Information:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\nQuestion: {{question}}\n\nAnswer: {{answer}}\n\nTask: Select\
+      \ sentences from the paragraphs in Information that explain the answer.\n|||\n\
+      Explanations:\n{% for paragraph in supporting_facts.title%}\n{% set outer_loop\
+      \ = loop %}\n{% for title in context.title%}\n{% if title==paragraph %}\n- {{\
+      \ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+      \ }}\n{% endif %}\n{% endfor %}\n{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Generate Explanations
+    reference: 'Given information, question, and its answer, list the sentences from
+      information that explain the answer '
+  6e33c684-725d-49a2-8da3-f9d0b2bb60a0: !Template
+    answer_choices: null
+    id: 6e33c684-725d-49a2-8da3-f9d0b2bb60a0
+    jinja: "Information:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\nWhat is the question that begets the answer of \"{{answer}}\"\
+      ?\n||| \n{{question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Generate Question
+    reference: Given information and answer, generate question.
+  9aab7543-e491-403f-a77b-63a57ef3316f: !Template
+    answer_choices: null
+    id: 9aab7543-e491-403f-a77b-63a57ef3316f
+    jinja: "{{question}} \n\nAnswer the question and give explanations, using the\
+      \ paragraphs below. \n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\n|||\nAnswer: {{answer}}\n\nExplanations:\n{% for paragraph\
+      \ in supporting_facts.title%}\n{% set outer_loop = loop %}\n{% for title in\
+      \ context.title%}\n{% if title==paragraph %}\n- {{ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+      \ }}\n{% endif %}\n{% endfor %}\n{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Generate Answer and Explanations
+    reference: Given information and question, answer it and list the sentences from
+      information that explain the answer.
+  c80dce20-70c3-4e5e-b792-ed000d035215: !Template
+    answer_choices: null
+    id: c80dce20-70c3-4e5e-b792-ed000d035215
+    jinja: "Generate titles (in the format of \"paragraph : title\") for each of the\
+      \ paragraphs below:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\n||| \n{% for sents in context.sentences %}\n  - {{sents\
+      \ | join(\"\")}} : {{context.title[loop.index0]}}\n{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: 'Generate Title #2'
+    reference: 'Given a list of paragraphs, generate titles for each of them with
+      the format of "paragraph: title".'
+  ea62fe03-8871-4322-8b5c-c060f8d41923: !Template
+    answer_choices: null
+    id: ea62fe03-8871-4322-8b5c-c060f8d41923
+    jinja: "Generate titles (separated by semi-colons) for each of the paragraphs\
+      \ below:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\")}}\n\
+      {% endfor %}\n||| \n{{context.title | join(\"; \")}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: 'Generate Title #1'
+    reference: Given a list of paragraphs, generate a string of titles (separated
+      by semi-colons) for each of them.
+  f14adb21-34ba-4641-b9ce-dfbd0ae9744c: !Template
+    answer_choices: null
+    id: f14adb21-34ba-4641-b9ce-dfbd0ae9744c
+    jinja: "Information:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\nQuestion: {{question}}\n||| \nAnswer: {{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Generate Answer
+    reference: Given information and question, generate answer.
diff --git a/promptsource/templates/hotpot_qa/fullwiki/templates.yaml b/promptsource/templates/hotpot_qa/fullwiki/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ebdb8a6ea0e4c97e0da5d8ad40c2999a45e08c95
--- /dev/null
+++ b/promptsource/templates/hotpot_qa/fullwiki/templates.yaml
@@ -0,0 +1,100 @@
+dataset: hotpot_qa
+subset: fullwiki
+templates:
+  287a9cf1-2c45-4e05-a596-8d03221275f8: !Template
+    answer_choices: null
+    id: 287a9cf1-2c45-4e05-a596-8d03221275f8
+    jinja: "{{question}} \n\nAnswer the question and give explanations, using the\
+      \ paragraphs below. \n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\n|||\nAnswer: {{answer}}\n\nExplanations:\n{% for paragraph\
+      \ in supporting_facts.title%}\n{% set outer_loop = loop %}\n{% for title in\
+      \ context.title%}\n{% if title==paragraph %}\n- {{ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+      \ }}\n{% endif %}\n{% endfor %}\n{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Generate Answer and Explanations
+    reference: Given information and question, answer it and list the sentences from
+      information that explain the answer.
+  3c9260ef-ca10-40fc-b775-39bae9d28ae5: !Template
+    answer_choices: null
+    id: 3c9260ef-ca10-40fc-b775-39bae9d28ae5
+    jinja: "Generate titles (in the format of \"paragraph : title\") for each of the\
+      \ paragraphs below:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\n||| \n{% for sents in context.sentences %}\n  - {{sents\
+      \ | join(\"\")}} : {{context.title[loop.index0]}}\n{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: 'Generate Title #2'
+    reference: 'Given a list of paragraphs, generate titles for each of them with
+      the format of "paragraph: title".'
+  43e2a527-a0b1-498f-ac1c-e0b3c272603d: !Template
+    answer_choices: null
+    id: 43e2a527-a0b1-498f-ac1c-e0b3c272603d
+    jinja: "Information:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\nWhat is the question that begets the answer of \"{{answer}}\"\
+      ?\n||| \n{{question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Generate Question
+    reference: Given information and answer, generate question.
+  8b7b3f27-c235-4a1c-907d-3f37e5f94d93: !Template
+    answer_choices: null
+    id: 8b7b3f27-c235-4a1c-907d-3f37e5f94d93
+    jinja: 'What is the type of the question "{{question}}" Comparison or bridge?
+
+      |||
+
+      {{type}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Classify Question Type
+    reference: Given question, classify its type.
+  970e537a-5295-4712-a3a2-d31ca11d1695: !Template
+    answer_choices: null
+    id: 970e537a-5295-4712-a3a2-d31ca11d1695
+    jinja: "Information:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\nQuestion: {{question}}\n\nAnswer: {{answer}}\n\nTask: Select\
+      \ sentences from the paragraphs in Information that explain the answer.\n|||\n\
+      Explanations:\n{% for paragraph in supporting_facts.title%}\n{% set outer_loop\
+      \ = loop %}\n{% for title in context.title%}\n{% if title==paragraph %}\n- {{\
+      \ context.sentences[loop.index0][supporting_facts.sent_id[outer_loop.index0]]\
+      \ }}\n{% endif %}\n{% endfor %}\n{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Generate Explanations
+    reference: 'Given information, question, and its answer, list the sentences from
+      information that explain the answer '
+  e1aadf60-85b4-4c1b-9803-c5231e71e74d: !Template
+    answer_choices: null
+    id: e1aadf60-85b4-4c1b-9803-c5231e71e74d
+    jinja: "Information:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\
+      \")}}\n{% endfor %}\nQuestion: {{question}}\n||| \nAnswer: {{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Generate Answer
+    reference: Given information and question, generate answer.
+  e20171e3-8965-4878-9014-0b72c84e9fec: !Template
+    answer_choices: null
+    id: e20171e3-8965-4878-9014-0b72c84e9fec
+    jinja: "Generate titles (separated by semi-colons) for each of the paragraphs\
+      \ below:\n{% for sents in context.sentences %}\n  - {{sents | join(\"\")}}\n\
+      {% endfor %}\n||| \n{{context.title | join(\"; \")}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: 'Generate Title #1'
+    reference: Given a list of paragraphs, generate a string of titles (separated
+      by semi-colons) for each of them.
diff --git a/promptsource/templates/humicroedit/subtask-1/templates.yaml b/promptsource/templates/humicroedit/subtask-1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dcb91aa09370d7859287f4cf16844e5a1a700114
--- /dev/null
+++ b/promptsource/templates/humicroedit/subtask-1/templates.yaml
@@ -0,0 +1,298 @@
+dataset: humicroedit
+subset: subtask-1
+templates:
+  27c7a53c-d5b8-410d-affc-95ff59a89c03: !Template
+    answer_choices: null
+    id: 27c7a53c-d5b8-410d-affc-95ff59a89c03
+    jinja: 'Please rate how funny the sentence is from {{"0.0"}} to {{"3.0"}}.
+
+      {{ original.replace(original[original.index("<"):original.index(">")+1], edit)
+      }}
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: only_edited_sent_rate
+    reference: ''
+  2d0e8f25-5680-4079-9a59-7b06329bd65a: !Template
+    answer_choices: null
+    id: 2d0e8f25-5680-4079-9a59-7b06329bd65a
+    jinja: 'Please rate how funny it is to replace "{{ original[original.index("<")+1:original.index("/>")]
+      }}" with "{{ edit }}" in the sentence "{{ original.replace(original[original.index("<"):original.index(">")+1],
+      original[original.index("<")+1:original.index("/>")]) }} " from {{"0.0"}} to
+      {{"3.0"}}.
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edit_word_rate
+    reference: ''
+  43af1016-6d25-434d-b9b0-893706cda5d6: !Template
+    answer_choices: null
+    id: 43af1016-6d25-434d-b9b0-893706cda5d6
+    jinja: 'Please give a score denoting the funniness of the following sentence.
+
+      {{ original.replace(original[original.index("<"):original.index(">")+1], edit)
+      }}
+
+      Your score should be something like {{"1.5"}}, where {{"0.0 means not funny,
+      1.0 means slightly funny, 2.0 means moderately funny and 3.0 means funny"}}.
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: only_edited_sent_examples
+    reference: ''
+  692750f4-b4a2-4344-bc4d-e05daef47c25: !Template
+    answer_choices: null
+    id: 692750f4-b4a2-4344-bc4d-e05daef47c25
+    jinja: 'I need to know how funny it is to replace "{{ original[original.index("<")+1:original.index("/>")]
+      }}" with "{{ edit }}" in the sentence "{{ original.replace(original[original.index("<"):original.index(">")+1],
+      original[original.index("<")+1:original.index("/>")]) }} ".
+
+      Question: Can you give me a number from {{"0.0 to 3.0"}} that denotes how funny
+      it is, where {{"0.0"}} means not funny and {{"3.0"}} means funny?
+
+      Answer:
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edit_word_funniness
+    reference: ''
+  6c6c7354-fcd5-4b0d-8672-671c639c25f5: !Template
+    answer_choices: null
+    id: 6c6c7354-fcd5-4b0d-8672-671c639c25f5
+    jinja: 'I need to know how funny the edited sentence is compared to the original
+      sentence:
+
+      Original: {{ original.replace(original[original.index("<"):original.index(">")+1],
+      original[original.index("<")+1:original.index("/>")]) }}
+
+      Edited: {{ original.replace(original[original.index("<"):original.index(">")+1],
+      edit) }}
+
+      Question: Can you give me a number from {{"0.0 to 3.0"}} that denotes how funny
+      it is, where {{"0.0"}} means not funny and {{"3.0"}} means funny?
+
+      Answer:
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edited_sent_funniness
+    reference: ''
+  759a11e7-5933-41a1-b803-f352eb385d28: !Template
+    answer_choices: null
+    id: 759a11e7-5933-41a1-b803-f352eb385d28
+    jinja: 'Please give a score denoting the funniness of replacing "{{ original[original.index("<")+1:original.index("/>")]
+      }}" with "{{ edit }}" in the sentence "{{ original.replace(original[original.index("<"):original.index(">")+1],
+      original[original.index("<")+1:original.index("/>")]) }} ".
+
+      Your score should be something like {{"1.5"}}, where {{"0.0 means not funny,
+      1.0 means slightly funny, 2.0 means moderately funny and 3.0 means funny"}}.
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edit_word_examples
+    reference: ''
+  8ae3f3c4-deb2-4a82-8a50-5f726b781e2a: !Template
+    answer_choices: null
+    id: 8ae3f3c4-deb2-4a82-8a50-5f726b781e2a
+    jinja: 'Please rate how funny the editd sentence is from {{"0.0"}} to {{"3.0"}}
+      compared to the original sentence.
+
+      Original: {{ original.replace(original[original.index("<"):original.index(">")+1],
+      original[original.index("<")+1:original.index("/>")]) }}
+
+      Edited: {{ original.replace(original[original.index("<"):original.index(">")+1],
+      edit) }}
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edited_sent_rate
+    reference: ''
+  90ac629a-f670-4c43-bbf8-a9ef9021c0b3: !Template
+    answer_choices: null
+    id: 90ac629a-f670-4c43-bbf8-a9ef9021c0b3
+    jinja: "I need to assign a score from {{\"0.0 to 3.0\"}} that denotes how funny\
+      \ it is to replace \"{{ original[original.index(\"<\")+1:original.index(\"/>\"\
+      )] }}\" with \"{{ edit }}\" in the sentence \"{{ original.replace(original[original.index(\"\
+      <\"):original.index(\">\")+1], original[original.index(\"<\")+1:original.index(\"\
+      />\")]) }} \". \nWhat score should I assign?\n||| \n{{ (((5 * meanGrade) | round)\
+      \ / 5) }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edit_word_score
+    reference: ''
+  93bd417d-a17f-460b-800c-5881ce752d98: !Template
+    answer_choices: null
+    id: 93bd417d-a17f-460b-800c-5881ce752d98
+    jinja: 'Rate on a scale from {{"0.0"}} to {{"3.0"}} how funny the sentence "{{
+      original.replace(original[original.index("<"):original.index(">")+1], edit)
+      }}" is.
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: only_edited_sent_rank
+    reference: ''
+  a08cab27-06fb-4c96-b6b1-eb0533fe9b25: !Template
+    answer_choices: null
+    id: a08cab27-06fb-4c96-b6b1-eb0533fe9b25
+    jinja: 'Please give a score denoting the funniness of the following edited sentence
+      compared to the original sentence.
+
+      Original: {{ original.replace(original[original.index("<"):original.index(">")+1],
+      original[original.index("<")+1:original.index("/>")]) }}
+
+      Edited: {{ original.replace(original[original.index("<"):original.index(">")+1],
+      edit) }}
+
+      Your score should be something like {{"1.5"}}, where {{"0.0 means not funny,
+      1.0 means slightly funny, 2.0 means moderately funny and 3.0 means funny"}}.
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edited_sent_examples
+    reference: ''
+  ac6a9fa1-0f23-4ee9-9bec-c6f9f8daf7a9: !Template
+    answer_choices: null
+    id: ac6a9fa1-0f23-4ee9-9bec-c6f9f8daf7a9
+    jinja: 'I need to assign a score from {{"0.0 to 3.0"}} that denotes how funny
+      the following edited sentence is compared to the original sentence:
+
+      Original: {{ original.replace(original[original.index("<"):original.index(">")+1],
+      original[original.index("<")+1:original.index("/>")]) }}
+
+      Edited: {{ original.replace(original[original.index("<"):original.index(">")+1],
+      edit) }}
+
+      What score should I assign?
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edited_sent_score
+    reference: ''
+  c1511cfc-8ba8-4d10-98b7-1e576cf02588: !Template
+    answer_choices: null
+    id: c1511cfc-8ba8-4d10-98b7-1e576cf02588
+    jinja: 'I need to assign a score from {{"0.0 to 3.0"}} that denotes how funny
+      the following sentence is :
+
+      {{ original.replace(original[original.index("<"):original.index(">")+1], edit)
+      }}
+
+      What score should I assign?
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: only_edited_sent_score
+    reference: ''
+  c53bbbcb-6bbb-4279-bd55-e3c4f7baa828: !Template
+    answer_choices: null
+    id: c53bbbcb-6bbb-4279-bd55-e3c4f7baa828
+    jinja: 'Rate on a scale from {{"0.0"}} to {{"3.0"}} how funny the edited sentence
+      "{{ original.replace(original[original.index("<"):original.index(">")+1], edit)
+      }}" is compared to the sentence "{{ original.replace(original[original.index("<"):original.index(">")+1],
+      original[original.index("<")+1:original.index("/>")]) }}".
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edited_sent_rank
+    reference: ''
+  ce115e3d-f63c-4030-8c13-bd77721ef0f5: !Template
+    answer_choices: null
+    id: ce115e3d-f63c-4030-8c13-bd77721ef0f5
+    jinja: 'I need to know how funny the sentence is:
+
+      {{ original.replace(original[original.index("<"):original.index(">")+1], edit)
+      }}
+
+      Question: Can you give me a number from {{"0.0 to 3.0"}} that denotes how funny
+      it is, where {{"0.0"}} means not funny and {{"3.0"}} means funny?
+
+      Answer:
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: only_edited_sent_funniness
+    reference: ''
+  fee13ce1-e03e-4dd2-9d7d-08c8fd8b74c7: !Template
+    answer_choices: null
+    id: fee13ce1-e03e-4dd2-9d7d-08c8fd8b74c7
+    jinja: 'Rate on a scale from {{"0.0"}} to {{"3.0"}} how funny it is to replace
+      "{{ original[original.index("<")+1:original.index("/>")] }}" with "{{ edit }}"
+      in the sentence "{{ original.replace(original[original.index("<"):original.index(">")+1],
+      original[original.index("<")+1:original.index("/>")]) }} ".
+
+      |||
+
+      {{ (((5 * meanGrade) | round) / 5) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edit_word_rank
+    reference: ''
diff --git a/promptsource/templates/humicroedit/subtask-2/templates.yaml b/promptsource/templates/humicroedit/subtask-2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..841769da3f6129f3351812278e4cf3fa341b3b13
--- /dev/null
+++ b/promptsource/templates/humicroedit/subtask-2/templates.yaml
@@ -0,0 +1,222 @@
+dataset: humicroedit
+subset: subtask-2
+templates:
+  437942d0-f1e0-4b17-83d0-01b19e54ec51: !Template
+    answer_choices: C ||| A ||| B
+    id: 437942d0-f1e0-4b17-83d0-01b19e54ec51
+    jinja: 'Given an original sentence "{{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      original1[original1.index("<")+1:original1.index("/>")]) }}", is it more humorous
+      to replace "{{ original1[original1.index("<")+1:original1.index("/>")] }}" with
+      "{{ edit1 }}", or to replace "{{ original2[original2.index("<")+1:original2.index("/>")]
+      }}" with "{{ edit2 }}", or are both equally humorous?
+
+      A. replace "{{ original1[original1.index("<")+1:original1.index("/>")] }}" with
+      "{{ edit1 }}"
+
+      B. replace "{{ original2[original2.index("<")+1:original2.index("/>")] }}" with
+      "{{ edit2 }}"
+
+      C. both equally humorous
+
+      The answer is
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edit_words_qa_id
+    reference: ''
+  49c71a8a-97af-465c-af04-36f08884e568: !Template
+    answer_choices: C ||| A ||| B
+    id: 49c71a8a-97af-465c-af04-36f08884e568
+    jinja: "Original sentence: {{ original1.replace(original1[original1.index(\"<\"\
+      ):original1.index(\">\")+1], original1[original1.index(\"<\")+1:original1.index(\"\
+      />\")]) }}.\nEdited sentence A: {{ original1.replace(original1[original1.index(\"\
+      <\"):original1.index(\">\")+1], edit1) }}.\nEdited sentence B: {{ original2.replace(original2[original2.index(\"\
+      <\"):original2.index(\">\")+1], edit2) }}.\nThere are two edited sentences based\
+      \ on the original sentence, which is more humorous or equally humorous? \nA.\
+      \ Edited sentence A\nB. Edited sentence B\nC. Equal\nThe answer is\n|||\n{{\
+      \ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edited_sentences_qa_id
+    reference: ''
+  67dc6d7e-dcbc-4444-9cde-6a8f8a0d2aa4: !Template
+    answer_choices: null
+    id: 67dc6d7e-dcbc-4444-9cde-6a8f8a0d2aa4
+    jinja: 'Which of the following sentences is more humorous? If they are equally
+      humorous, please answer "equal".
+
+      - {{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      edit1) }}
+
+      - {{ original2.replace(original2[original2.index("<"):original2.index(">")+1],
+      edit2) }}
+
+      |||
+
+      {{ ["equal", original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      edit1), original2.replace(original2[original2.index("<"):original2.index(">")+1],
+      edit2)][label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: only_edited_sentences_text
+    reference: ''
+  6d576e77-df98-47cd-b92e-c87a56190be4: !Template
+    answer_choices: C ||| A ||| B
+    id: 6d576e77-df98-47cd-b92e-c87a56190be4
+    jinja: 'Given an original sentence "{{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      original1[original1.index("<")+1:original1.index("/>")]) }}", we have two replacement
+      strategies. The first is to replace "{{ original1[original1.index("<")+1:original1.index("/>")]
+      }}" with "{{ edit1 }}", and the second is to replace "{{ original2[original2.index("<")+1:original2.index("/>")]
+      }}" with "{{ edit2 }}".
+
+      Which strategy is more humorous or equally humorous?
+
+      A. The first strategy
+
+      B. The second strategy
+
+      C. Equal
+
+      The answer is
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edit_words_qa_strategy_id
+    reference: ''
+  794ee65f-df0a-4448-8eac-20f757a8918d: !Template
+    answer_choices: C ||| A ||| B
+    id: 794ee65f-df0a-4448-8eac-20f757a8918d
+    jinja: 'There are two sentences:
+
+      Sentence1: {{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      edit1) }}
+
+      Sentence2: {{ original2.replace(original2[original2.index("<"):original2.index(">")+1],
+      edit2) }}
+
+      Which sentence is more humorous?
+
+      A. Sentence 1
+
+      B. Sentence 2
+
+      C. Equal
+
+      The answer is
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: only_edited_sentences_QA_id
+    reference: ''
+  88054771-74d2-481f-91f1-c078a2bda5b9: !Template
+    answer_choices: equal ||| A ||| B
+    id: 88054771-74d2-481f-91f1-c078a2bda5b9
+    jinja: 'Which of the following sentences is more humorous? If they are equally
+      humorous, please answer "equal".
+
+      A. {{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      edit1) }}
+
+      B. {{ original2.replace(original2[original2.index("<"):original2.index(">")+1],
+      edit2) }}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: only_edited_sentences_id
+    reference: ''
+  8e5f09ae-27bc-4b34-b20e-6bc6672a2c1a: !Template
+    answer_choices: Equal ||| Sentence 1 ||| Sentence 2
+    id: 8e5f09ae-27bc-4b34-b20e-6bc6672a2c1a
+    jinja: 'There are two sentences:
+
+      Sentence1: {{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      edit1) }}
+
+      Sentence2: {{ original2.replace(original2[original2.index("<"):original2.index(">")+1],
+      edit2) }}
+
+      Which sentence is more humorous?
+
+      - Sentence 1
+
+      - Sentence 2
+
+      - Equal
+
+      The answer is
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: only_edited_sentences_QA_text
+    reference: ''
+  b9e3fe90-d328-44a8-bb6e-212f600a2050: !Template
+    answer_choices: Equal ||| Fisrt ||| Second
+    id: b9e3fe90-d328-44a8-bb6e-212f600a2050
+    jinja: 'Given an original sentence "{{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      original1[original1.index("<")+1:original1.index("/>")]) }}", we have two replacement
+      strategies:
+
+      The first is to replace "{{ original1[original1.index("<")+1:original1.index("/>")]
+      }}" with "{{ edit1 }}".
+
+      The second is to replace "{{ original2[original2.index("<")+1:original2.index("/>")]
+      }}" with "{{ edit2 }}".
+
+      Which strategy is more humorous or equally humorous?
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edit_words_qa_strategy
+    reference: ''
+  ec92a63f-7d82-48f0-a9e4-8e99dd5a0bb0: !Template
+    answer_choices: Equal ||| First ||| Second
+    id: ec92a63f-7d82-48f0-a9e4-8e99dd5a0bb0
+    jinja: 'Given an original sentence "{{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      original1[original1.index("<")+1:original1.index("/>")]) }}", we have two edited
+      sentences. The first is "{{ original1.replace(original1[original1.index("<"):original1.index(">")+1],
+      edit1) }}", and the second is "{{ original2.replace(original2[original2.index("<"):original2.index(">")+1],
+      edit2) }}". Which edited sentence is more humorous or equally humorous?
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: original_sent_edited_sentences_qa
+    reference: ''
diff --git a/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml b/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d3957389f880799d6d298c6dcbecf11894486ff5
--- /dev/null
+++ b/promptsource/templates/hyperpartisan_news_detection/byarticle/templates.yaml
@@ -0,0 +1,151 @@
+dataset: hyperpartisan_news_detection
+subset: byarticle
+templates:
+  06fbb182-0077-4355-b7cc-b4d0d2e98c08: !Template
+    answer_choices: null
+    id: 06fbb182-0077-4355-b7cc-b4d0d2e98c08
+    jinja: 'Consider this news article text:
+
+
+      "{{text}}"
+
+
+      Does it follow a hyperpartisan argumentation? {{"True"}} or {{"False"}}?
+
+
+      |||
+
+
+      {{ hyperpartisan }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: ha
+    reference: ''
+  0e988588-f3f1-4c70-80ce-dc5660cde6dc: !Template
+    answer_choices: null
+    id: 0e988588-f3f1-4c70-80ce-dc5660cde6dc
+    jinja: '"{{text}}"
+
+
+      The news article text above follows a hyperpartisan argumentation. {{"True"}}
+      or {{"False"}}?
+
+
+      |||
+
+
+      {{hyperpartisan}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: ha_2
+    reference: ''
+  545567e7-97cf-4600-bf1e-94f213d2f0a4: !Template
+    answer_choices: null
+    id: 545567e7-97cf-4600-bf1e-94f213d2f0a4
+    jinja: '"{{text}}"
+
+
+      We must consume the news article above with caution as it exhibits prejudiced
+      allegiance towards one group or cause. {{"True"}} or {{"False"}}?
+
+
+      |||
+
+
+      {{hyperpartisan}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: extreme
+    reference: ''
+  5a612b28-923b-4302-b959-290bf8453166: !Template
+    answer_choices: null
+    id: 5a612b28-923b-4302-b959-290bf8453166
+    jinja: '"{{text}}"
+
+
+      The news article above takes an extreme left-wing or right-wing standpoint.
+      {{"True"}} or {{"False"}}?
+
+
+      |||
+
+
+      {{hyperpartisan}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: left_right
+    reference: ''
+  8ab54a01-2728-4ac2-8ee9-79016434454f: !Template
+    answer_choices: null
+    id: 8ab54a01-2728-4ac2-8ee9-79016434454f
+    jinja: '"{{text}}"
+
+
+      The publisher of the news piece above has reported strongly in favor of one
+      political side and seems to have ignored the other side. {{"True"}} or {{"False"}}?
+
+
+      |||
+
+
+      {{hyperpartisan}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: opp
+    reference: ''
+  b95dbb9c-793c-4a58-8097-31c9f0230e5f: !Template
+    answer_choices: null
+    id: b95dbb9c-793c-4a58-8097-31c9f0230e5f
+    jinja: 'Consider this news piece:
+
+
+      "{{text}}"
+
+
+      It exhibits blind, prejudiced, or unreasoning allegiance to one party, faction,
+      cause, or person. {{"True"}} or {{"False"}}?
+
+
+      |||
+
+
+      {{hyperpartisan}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: ha_def
+    reference: ''
+  d4f7f589-995a-473e-b87b-68b9a0fea0d8: !Template
+    answer_choices: null
+    id: d4f7f589-995a-473e-b87b-68b9a0fea0d8
+    jinja: 'Consider this news piece:
+
+
+      "{{text}}"
+
+
+      It exhibits extreme one-sidedness to a single group of individuals or cause.
+      {{"True"}} or {{"False"}}?
+
+
+      |||
+
+
+      {{hyperpartisan}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: one-sidedness
+    reference: ''
diff --git a/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml b/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a3e20c9174cf99fea6879848c1e29395b9fca39e
--- /dev/null
+++ b/promptsource/templates/hyperpartisan_news_detection/bypublisher/templates.yaml
@@ -0,0 +1,126 @@
+dataset: hyperpartisan_news_detection
+subset: bypublisher
+templates:
+  43db0412-e7a0-4976-8b97-6da598092ed8: !Template
+    answer_choices: right ||| right-center ||| least ||| left-center ||| left
+    id: 43db0412-e7a0-4976-8b97-6da598092ed8
+    jinja: '"{{text}}"
+
+
+      How would you describe the political leaning of the publisher who reported the
+      news piece above? Please choose one of these options:
+
+
+
+      {{"right"}}, {{"right-center"}}, {{"least"}}, {{"left-center"}}, and {{"left"}}.
+
+
+      |||
+
+
+      {{ answer_choices[bias] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: leaning
+    reference: ''
+  62fc329c-188c-43da-98b7-aa6580cdef17: !Template
+    answer_choices: right ||| right-center ||| least ||| left-center ||| left
+    id: 62fc329c-188c-43da-98b7-aa6580cdef17
+    jinja: '"{{text}}"
+
+
+      The news piece above gives the readers a biased view of political news. How
+      would you decribe the bias?
+
+
+      {{"right"}}, {{"right-center"}}, {{"least"}}, {{"left-center"}}, or {{"left"}}?
+
+
+      |||
+
+
+      {{ answer_choices[bias] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: political_position_readers
+    reference: ''
+  6b26ad81-4777-4323-98de-e7956cedc1ef: !Template
+    answer_choices: right ||| right-center ||| least ||| left-center ||| left
+    id: 6b26ad81-4777-4323-98de-e7956cedc1ef
+    jinja: '"{{text}}"
+
+
+      The news piece above offers a biased view of political news. Which of the following
+      options do you agree with?
+
+
+      {{"right"}}, {{"right-center"}}, {{"least"}}, {{"left-center"}}, or {{"left"}}?
+
+
+      |||
+
+
+      {{ answer_choices[bias] }}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: political_position_readers_2
+    reference: ''
+  7512e26a-0130-438a-ac39-dde3a4274fcf: !Template
+    answer_choices: right ||| right-center ||| least ||| left-center ||| left
+    id: 7512e26a-0130-438a-ac39-dde3a4274fcf
+    jinja: '"{{text}}"
+
+
+      Based on the news piece above, which of the following options best captures
+      the publisher''s poltical position?
+
+
+      Options: {{"right"}}, {{"right-center"}}, {{"least"}}, {{"left-center"}}, and
+      {{"left"}}.
+
+
+      |||
+
+
+      {{ answer_choices[bias] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: political_position
+    reference: ''
+  8cc1b595-29b0-49bc-8323-73fa489d936c: !Template
+    answer_choices: right ||| right-center ||| least ||| left-center ||| left
+    id: 8cc1b595-29b0-49bc-8323-73fa489d936c
+    jinja: 'Consider this news piece:
+
+
+      "{{text}}"
+
+
+      Its publisher exhibits a political bias through their reporting of the news.
+      Which of these options do you agree with regarding the said bias?
+
+
+      Options: {{"right"}}, {{"right-center"}}, {{"least"}}, {{"left-center"}}, and
+      {{"left"}}.
+
+
+      |||
+
+
+      {{ answer_choices[bias] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: political_bias
+    reference: ''
diff --git a/promptsource/templates/imdb/templates.yaml b/promptsource/templates/imdb/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dd6bb684f456d222ba02a3b43cbdb1540b2a6132
--- /dev/null
+++ b/promptsource/templates/imdb/templates.yaml
@@ -0,0 +1,133 @@
+dataset: imdb
+templates:
+  02ff2949-0f45-4d97-941e-6fa4c0afbc2d: !Template
+    answer_choices: negative ||| positive
+    id: 02ff2949-0f45-4d97-941e-6fa4c0afbc2d
+    jinja: The following movie review expresses what sentiment? {{text}} ||| {{ answer_choices
+      [label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Movie Expressed Sentiment 2
+    reference: ''
+  2351d12a-e630-4d19-8b41-e199266e38f7: !Template
+    answer_choices: bad ||| good
+    id: 2351d12a-e630-4d19-8b41-e199266e38f7
+    jinja: '{{text}} Did the reviewer find this movie {{"good or bad"}}? ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Opinion bad good choices
+    reference: ''
+  5f372fb1-795a-47b6-8ddf-c4fd1579e76a: !Template
+    answer_choices: negative ||| positive
+    id: 5f372fb1-795a-47b6-8ddf-c4fd1579e76a
+    jinja: "{{text}} \nIs this review {{\"positive or negative\"}}? ||| \n{{answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: 'Sentiment with choices '
+    reference: ''
+  866474a5-1498-46b7-bfee-ac0c5160707f: !Template
+    answer_choices: negative ||| positive
+    id: 866474a5-1498-46b7-bfee-ac0c5160707f
+    jinja: '{{text}} How does the viewer feel about the movie? ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Sentiment Feeling
+    reference: ''
+  96538f30-f2c1-430e-8fc6-936a16966d9c: !Template
+    answer_choices: negative ||| positive
+    id: 96538f30-f2c1-430e-8fc6-936a16966d9c
+    jinja: '{{text}} What sentiment does the writer express for the movie? ||| {{
+      answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Writer Expressed Sentiment
+    reference: ''
+  af51297c-38a3-4d6c-a8b5-04b1243d7443: !Template
+    answer_choices: negative ||| positive
+    id: af51297c-38a3-4d6c-a8b5-04b1243d7443
+    jinja: '{{text}} The sentiment expressed for the movie is ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Movie Expressed Sentiment
+    reference: ''
+  b93b74ac-fe95-40b4-9610-318b46ab820f: !Template
+    answer_choices: negative ||| positive
+    id: b93b74ac-fe95-40b4-9610-318b46ab820f
+    jinja: '{{text}} What is the sentiment expressed in this text? ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Text Expressed Sentiment
+    reference: ''
+  b9b5d79d-f0b3-4bec-a724-f585db3e93ff: !Template
+    answer_choices: negative ||| positive
+    id: b9b5d79d-f0b3-4bec-a724-f585db3e93ff
+    jinja: '{{text}} This is definitely not a ||| {{ answer_choices [1-label]}} review.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Negation template for positive and negative
+    reference: ''
+  bd82ba0f-01d4-4fa1-bf8d-07e392c00cd9: !Template
+    answer_choices: No ||| Yes
+    id: bd82ba0f-01d4-4fa1-bf8d-07e392c00cd9
+    jinja: '{{text}} Did the reviewer enjoy the movie? ||| {{ answer_choices [label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Enjoyment Yes No
+    reference: ''
+  c70d1687-2421-49a2-9553-91b8bac4cfbe: !Template
+    answer_choices: negative ||| positive
+    id: c70d1687-2421-49a2-9553-91b8bac4cfbe
+    jinja: '{{text}} What is the sentiment expressed by the reviewer for the movie?
+      ||| {{ answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Expressed Sentiment
+    reference: ''
+  e01970ab-42c0-4e6e-a08f-4940d889ef37: !Template
+    answer_choices: They didn't like it! ||| They loved it
+    id: e01970ab-42c0-4e6e-a08f-4940d889ef37
+    jinja: '{{text}} How does the reviewer feel about the movie? ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Enjoyment
+    reference: ''
diff --git a/promptsource/templates/jfleg/templates.yaml b/promptsource/templates/jfleg/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..11ece10dbdb3597ec419964a675ce642fd13d4b9
--- /dev/null
+++ b/promptsource/templates/jfleg/templates.yaml
@@ -0,0 +1,161 @@
+dataset: jfleg
+templates:
+  18d3362c-74e1-4cda-9b16-001948d9196b: !Template
+    answer_choices: null
+    id: 18d3362c-74e1-4cda-9b16-001948d9196b
+    jinja: 'I am correcting the grammar exercises of my students. How should be the
+      following sentence re-written?
+
+
+      {{sentence}}
+
+
+      |||
+
+
+
+      {{corrections | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_v7
+    reference: ''
+  491f0d06-2acf-4977-8ac5-c20b9bd17cce: !Template
+    answer_choices: null
+    id: 491f0d06-2acf-4977-8ac5-c20b9bd17cce
+    jinja: 'The text needs to be fluent or native-sounding. The following sentence
+      is not.
+
+
+      {{sentence}}
+
+
+      However, an improved version of the same sentence is:
+
+
+      |||
+
+
+      {{ corrections | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_v4
+    reference: ''
+  52a79f92-f24c-45e7-89a4-a9dc8f6e0ecf: !Template
+    answer_choices: null
+    id: 52a79f92-f24c-45e7-89a4-a9dc8f6e0ecf
+    jinja: 'A native English speaker would find the following sentence hard to understand
+      because of grammatical errors.
+
+
+      {{sentence}}
+
+
+      A correct version of the same sentence could be:
+
+
+      |||
+
+
+      {{ corrections | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_v2
+    reference: ''
+  5ba0d7ab-d732-4c47-b5df-be0ae4feb1cd: !Template
+    answer_choices: null
+    id: 5ba0d7ab-d732-4c47-b5df-be0ae4feb1cd
+    jinja: 'The English language has specific rules that need to be followed. The
+      grammatical error in the following sentence :
+
+
+      {{sentence}}
+
+
+      can be corrected to:
+
+
+      |||
+
+
+      {{ corrections | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_v3
+    reference: ''
+  68e68d2f-ed55-4f0e-b512-22166fd5206b: !Template
+    answer_choices: null
+    id: 68e68d2f-ed55-4f0e-b512-22166fd5206b
+    jinja: '{{sentence}}
+
+
+      According to academic writing rules, the above sentence is hard to understand
+      because it is grammatically incorrect.
+
+
+      There can be many ways of correcting the above sentence. One possible option
+      is:
+
+
+      |||
+
+
+      {{ corrections | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_v5
+    reference: ''
+  8461a024-9a16-4aef-94a9-d69c8f12a590: !Template
+    answer_choices: null
+    id: 8461a024-9a16-4aef-94a9-d69c8f12a590
+    jinja: 'The following sentence is hard to read because it is grammatically incorrect.
+
+
+      {{sentence}}
+
+
+      A correct version of the same sentence could be:
+
+
+      |||
+
+
+      {{ corrections | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_v1
+    reference: Correcting the grammar
+  e9d603d4-cf1a-404e-8d82-cd29a04e82d3: !Template
+    answer_choices: null
+    id: e9d603d4-cf1a-404e-8d82-cd29a04e82d3
+    jinja: 'As an English Teacher, I find it hard to read sentences with grammatical
+      errors like the following:
+
+
+      {{sentence}}
+
+
+      A possible way of correcting the above sentence is:
+
+
+      |||
+
+
+      {{ corrections | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: template_v6
+    reference: ''
diff --git a/promptsource/templates/jigsaw_unintended_bias/templates.yaml b/promptsource/templates/jigsaw_unintended_bias/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b1a2e9ec40c12871cbdcf963a24af6a4651b0a27
--- /dev/null
+++ b/promptsource/templates/jigsaw_unintended_bias/templates.yaml
@@ -0,0 +1,189 @@
+dataset: jigsaw_unintended_bias
+templates:
+  17f730bd-fc7e-419c-84cc-e5c795268d02: !Template
+    answer_choices: null
+    id: 17f730bd-fc7e-419c-84cc-e5c795268d02
+    jinja: 'Online comments can sometimes be toxic. For instance, they might be insulting,
+      threatening, racist or many other issues. If 0 corresponds to a very civil comment,
+      and 100 to a highly toxic one, how would you grade the following comment?
+
+      "{{comment_text}}"
+
+      |||
+
+      {{(target * 100.0) | round(0) }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - Other
+      original_task: true
+    name: online_comment
+    reference: ''
+  1add6d25-346e-4bf5-aa58-8dc184018f9e: !Template
+    answer_choices: keep ||| remove
+    id: 1add6d25-346e-4bf5-aa58-8dc184018f9e
+    jinja: 'You are the moderator of the comments section on a website. You want to
+      remove uncivil comments. Would you {{answer_choices[1]}} or {{answer_choices[0]}}
+      the following comment?
+
+      "{{comment_text}}"
+
+      |||
+
+      {{answer_choices[target | round | int]}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Other
+      original_task: true
+    name: moderator
+    reference: ''
+  355aa574-61c2-4b94-9f1a-9347fd7c6444: !Template
+    answer_choices: null
+    id: 355aa574-61c2-4b94-9f1a-9347fd7c6444
+    jinja: "{{toxicity_annotator_count}} annotators where tasked with assessing the\
+      \ toxicity of the following comment:\n\n\"{{comment_text}}\"\n\nOn a scale of\
+      \ 0 to 1, what do you think their average prediction was?\n\n||| \n\n{{ target\
+      \ | round(1) }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - Other
+      original_task: true
+    name: annotators_rated
+    reference: ''
+  3f5fe7e8-0808-4792-94e5-f5d8fff7c89d: !Template
+    answer_choices: null
+    id: 3f5fe7e8-0808-4792-94e5-f5d8fff7c89d
+    jinja: How toxic is this comment is on a scale from 0 to 5? "{{comment_text}}"
+      ||| {{(target * 5.0) | round(1) }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - Other
+      original_task: true
+    name: modified_scale
+    reference: ''
+  48ef9835-a99b-45f5-8ff4-96b3b2422855: !Template
+    answer_choices: null
+    id: 48ef9835-a99b-45f5-8ff4-96b3b2422855
+    jinja: 'You were hired to evaluate the toxicity of online comments. You find the
+      following comment:
+
+      "{{comment_text}}"
+
+      How would you grade its toxicity from 0 to 1?
+
+      |||
+
+      {{target | round(1) }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - Other
+      original_task: true
+    name: hired_to_review
+    reference: ''
+  5e06b59c-9888-47b1-a982-b90bced2d6a2: !Template
+    answer_choices: civil ||| toxic
+    id: 5e06b59c-9888-47b1-a982-b90bced2d6a2
+    jinja: 'You are browsing the comment section of a website. You read the following
+      comment:
+
+      "{{comment_text}}"
+
+      Would you classify that comment as {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+      |||
+
+      {{answer_choices[target | round | int]}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Other
+      original_task: true
+    name: browsing
+    reference: ''
+  68962b6a-2ed3-48d5-ba7c-9f023e314592: !Template
+    answer_choices: civil ||| toxic
+    id: 68962b6a-2ed3-48d5-ba7c-9f023e314592
+    jinja: "Someone posted the following comment online:\n\n\"{{comment_text}}\"\n\
+      \nWould you classify it as {{answer_choices[0]}} or {{answer_choices[1]}}?\n\
+      \n||| \n\n{{answer_choices[target | round | int]}}\n\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Other
+      original_task: true
+    name: binary_pred
+    reference: ''
+  71508d0b-7fd9-4810-933c-c3c59f0c5328: !Template
+    answer_choices: null
+    id: 71508d0b-7fd9-4810-933c-c3c59f0c5328
+    jinja: 'On a scale of 0 to 1, how threatening is the below comment?
+
+      {{comment_text}}
+
+      |||
+
+      {{threat | round(1)}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - Other
+      original_task: null
+    name: threatening
+    reference: ''
+  71508d0b-7fd9-4810-933c-c3c59f0c5329: !Template
+    answer_choices: no ||| yes
+    id: 71508d0b-7fd9-4810-933c-c3c59f0c5329
+    jinja: '"{{comment_text}}" Someone posted this comment online. Do you find it
+      toxic?
+
+      |||
+
+      {{answer_choices[target | round | int]}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics:
+      - Other
+      original_task: true
+    name: yes_or_no
+    reference: ''
+  71508d1b-7fd9-4810-933c-c3c59f0c5329: !Template
+    answer_choices: no ||| yes
+    id: 71508d1b-7fd9-4810-933c-c3c59f0c5329
+    jinja: '"{{comment_text}}"
+
+      Should this online comment be removed for its toxicity? {{answer_choices[1]}}
+      or {{answer_choices[0]}}?
+
+      |||
+
+      {{answer_choices[target | round | int]}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Other
+      original_task: true
+    name: yes_or_no_remove
+    reference: ''
+  a9cf1274-4167-4647-8058-986b4eeff1a0: !Template
+    answer_choices: null
+    id: a9cf1274-4167-4647-8058-986b4eeff1a0
+    jinja: 'How toxic is the below comment on a scale of 0 to 1?
+
+
+      "{{comment_text}}"
+
+
+      |||
+
+      {{target | round(1)}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: how_toxic
+    reference: ''
diff --git a/promptsource/templates/kelm/templates.yaml b/promptsource/templates/kelm/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d8a5214e8c4acb9f0a0945f796d538e0444b0115
--- /dev/null
+++ b/promptsource/templates/kelm/templates.yaml
@@ -0,0 +1,83 @@
+dataset: kelm
+templates:
+  26acfe5b-f295-4b84-b643-45e05a17d286: !Template
+    answer_choices: null
+    id: 26acfe5b-f295-4b84-b643-45e05a17d286
+    jinja: "Given a natural language sentence expressing facts of the form (subject,\
+      \ relation, object), generate structured triples expressing all relevant relations\
+      \ \n\n{{sentence}} |||\n{{triple}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Generated Sentence to KB Triple Paraphrase 1
+    reference: Given a sentence, generate triples of the form (subject, relation,
+      object)
+  3381175a-b93e-4d1e-a7f2-428c5d2c7c2b: !Template
+    answer_choices: null
+    id: 3381175a-b93e-4d1e-a7f2-428c5d2c7c2b
+    jinja: 'Given facts from a knowledge base encoded in the form (subject, relation,
+      object), generate a natural language sentence that uses all facts provided as
+      input.
+
+
+      {{ triple }} |||
+
+      {{ sentence }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: KB Triple to Generated Sentence Paraphrase 1
+    reference: Convert a KB triple of the form (subject, relation, object) to a natural
+      language sentence
+  4d674e43-c569-4f0c-9b5c-436f430da92a: !Template
+    answer_choices: null
+    id: 4d674e43-c569-4f0c-9b5c-436f430da92a
+    jinja: 'Given a sentence, generate triples of the form (subject, relation, object):
+
+
+      {{sentence}} |||
+
+      {{triple}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Generated Sentence to KB Triple
+    reference: Given a sentence, generate a string of the form (subject, relation,
+      object)
+  55909592-633d-4cef-97ff-058c86eea28f: !Template
+    answer_choices: null
+    id: 55909592-633d-4cef-97ff-058c86eea28f
+    jinja: 'Given facts of the form (subject, relation, object), create a natural
+      language sentence that uses all of the facts provided as input:
+
+
+      "{{ triple }}" |||
+
+      {{ sentence }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: KB Triple to Generated Sentence Paraphrase 2
+    reference: Convert a KB triple of the form (subject, relation, object) to a natural
+      language sentence
+  7478edee-5950-4ca2-8878-9c5a98925952: !Template
+    answer_choices: null
+    id: 7478edee-5950-4ca2-8878-9c5a98925952
+    jinja: 'Given KB triples of the form (subject, relation, object), generate a natural
+      language sentence:
+
+
+      {{ triple }} |||
+
+      {{ sentence }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: KB Triple to Generated Sentence
+    reference: Convert a KB triple of the form (subject, relation, object) to a natural
+      language sentence
diff --git a/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml b/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..437291801fc11fda627d9355754f4f8500b16a48
--- /dev/null
+++ b/promptsource/templates/kilt_tasks/hotpotqa/templates.yaml
@@ -0,0 +1,104 @@
+dataset: kilt_tasks
+subset: hotpotqa
+templates:
+  1a123f3a-0507-41b9-904f-b18d9ce2b79e: !Template
+    answer_choices: null
+    id: 1a123f3a-0507-41b9-904f-b18d9ce2b79e
+    jinja: '{% if output %}
+
+      Here''s a complex question that requires someone to reason about the input,
+      can you answer it?
+
+      {{input}}
+
+      |||
+
+      {{output | map(attribute="answer") | list | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: complex_question
+    reference: ''
+  5531ce47-35ff-4bce-943d-5b2b86c44352: !Template
+    answer_choices: null
+    id: 5531ce47-35ff-4bce-943d-5b2b86c44352
+    jinja: '{% if output %}
+
+      Combine facts and answer this: {{input}}
+
+      |||
+
+      {{output | map(attribute="answer") | list | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: combining_facts
+    reference: ''
+  5ce9d659-4df8-4afd-a6e1-3e542df0035a: !Template
+    answer_choices: null
+    id: 5ce9d659-4df8-4afd-a6e1-3e542df0035a
+    jinja: '{% if output %}
+
+      Formulate an answer to this elaborate question: {{input}}
+
+      |||
+
+      {{output | map(attribute="answer") | list | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: formulate
+    reference: ''
+  9211f663-51f9-428e-ba27-158480eee083: !Template
+    answer_choices: null
+    id: 9211f663-51f9-428e-ba27-158480eee083
+    jinja: '{% if output %}
+
+      FINAL EXAM
+
+
+      Question 1. {{input}}
+
+      |||
+
+      {{output | map(attribute="answer") | list | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: final_exam
+    reference: ''
+  ac0545a1-9363-4c17-aada-f0eedf5a24b2: !Template
+    answer_choices: null
+    id: ac0545a1-9363-4c17-aada-f0eedf5a24b2
+    jinja: '{% if output %}
+
+      {{input}}
+
+      |||
+
+      {{output | map(attribute="answer") | list | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: straighforward_qa
+    reference: ''
diff --git a/promptsource/templates/kilt_tasks/nq/templates.yaml b/promptsource/templates/kilt_tasks/nq/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f5c364639578565303dd179079bf41f05d64721e
--- /dev/null
+++ b/promptsource/templates/kilt_tasks/nq/templates.yaml
@@ -0,0 +1,163 @@
+dataset: kilt_tasks
+subset: nq
+templates:
+  294fd8f3-c7e0-4b3c-abd3-64527f8f71b1: !Template
+    answer_choices: null
+    id: 294fd8f3-c7e0-4b3c-abd3-64527f8f71b1
+    jinja: '{% if output %}
+
+      The goal is to predict an English answer string for an input English question.
+      All questions can be answered using the contents of English Wikipedia.
+
+      Question: {{input}}
+
+      Answer:
+
+      |||
+
+      {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+      }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: formal_description
+    reference: Copied from the nq_open dataset description.
+  44f247e1-9d7e-43b9-af4b-6202fd16d0c0: !Template
+    answer_choices: null
+    id: 44f247e1-9d7e-43b9-af4b-6202fd16d0c0
+    jinja: '{% if output %}
+
+      Search query: {{input}}
+
+      Response:
+
+      |||
+
+      {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+      }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: search query
+    reference: ''
+  485046dc-0835-4b42-b330-e0ca2ee7f7a1: !Template
+    answer_choices: null
+    id: 485046dc-0835-4b42-b330-e0ca2ee7f7a1
+    jinja: '{% if output %}
+
+      Question : {{input}}
+
+      Answer :
+
+      |||
+
+      {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|join('',
+      '') }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: false
+    name: question_with_multiple_answer
+    reference: Plain question with multiple answers
+  a5e73119-b3d5-408f-a954-56951ea070f3: !Template
+    answer_choices: null
+    id: a5e73119-b3d5-408f-a954-56951ea070f3
+    jinja: '{% if output %}
+
+      Guess a question that has the answer "{{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto",
+      "")|list|choice }}"
+
+      |||
+
+      {{input}}?
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: guess_question
+    reference: Guess a question. It will show if model can evaluate entity in question.
+  d5fabd3e-4d2e-45bc-888a-0f3a7ea48c85: !Template
+    answer_choices: null
+    id: d5fabd3e-4d2e-45bc-888a-0f3a7ea48c85
+    jinja: '{% if output %}
+
+      Question : {{input}}
+
+      Answer :
+
+      |||
+
+      {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+      }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: question_answer
+    reference: Plain Question
+  e1ae7863-b30e-4a1a-8c4f-74b8baee5ba9: !Template
+    answer_choices: null
+    id: e1ae7863-b30e-4a1a-8c4f-74b8baee5ba9
+    jinja: '{% if output %}
+
+      I''ve always wondered: {{input}}
+
+      |||
+
+      {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+      }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: first_person_context
+    reference: Ask a question in first person
+  f63f6b1e-ef1f-4cd8-bb6e-aaf24fed8936: !Template
+    answer_choices: null
+    id: f63f6b1e-ef1f-4cd8-bb6e-aaf24fed8936
+    jinja: '{% if output %}
+
+      Answer the following question.
+
+      {{input}}
+
+      |||
+
+      {{output|selectattr("answer")|map(attribute=''answer'')|reject("equalto", "")|list|choice
+      }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: question_with_instruction
+    reference: Instruction before question.
diff --git a/promptsource/templates/lambada/templates.yaml b/promptsource/templates/lambada/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e12c6abf656df991a166eeaca7c24d759c27c711
--- /dev/null
+++ b/promptsource/templates/lambada/templates.yaml
@@ -0,0 +1,72 @@
+dataset: lambada
+templates:
+  3747e80a-4182-44eb-944b-dee40095bb17: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: 3747e80a-4182-44eb-944b-dee40095bb17
+    jinja: 'Please predict the next word after the following chunk of text.
+
+
+      {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: please next word
+    reference: ''
+  506765b8-17c0-4946-bbb0-b28288caacb3: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: 506765b8-17c0-4946-bbb0-b28288caacb3
+    jinja: '{{ text.split()[:-1] | join('' '') }} ____.
+
+
+      Fill in the ____: ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fill in the ____
+    reference: ''
+  948664d5-2ea2-4245-b656-9283948dd5cd: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: 948664d5-2ea2-4245-b656-9283948dd5cd
+    jinja: '{{ text.split()[:-1] | join('' '') }}...
+
+
+      What comes after the ellipses? ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: ellipses
+    reference: ''
+  acfe374c-60ce-4354-b285-e7b0717cffe5: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: acfe374c-60ce-4354-b285-e7b0717cffe5
+    jinja: 'This story got cut short. What comes next?
+
+
+      {{ text.split()[:-1] | join('' '') }} ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: what comes next
+    reference: ''
+  d5707bd9-d3cc-4535-b4c1-5c2aee8cb8c7: !Template
+    answer_choices: '{{ text.split()[:-1] | unique | join('' ||| '') }}'
+    id: d5707bd9-d3cc-4535-b4c1-5c2aee8cb8c7
+    jinja: 'Fill in the blank:
+
+
+      {{ text.split()[:-1] | join('' '') }} ____. ||| {{ text.split()[-1] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style
+    reference: Brown et al.
diff --git a/promptsource/templates/liar/templates.yaml b/promptsource/templates/liar/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9836f89c577c557f73c7a026babe400f81c0d0bd
--- /dev/null
+++ b/promptsource/templates/liar/templates.yaml
@@ -0,0 +1,119 @@
+dataset: liar
+templates:
+  09b28c40-0029-4fed-bb1e-ad16b8e55cd8: !Template
+    answer_choices: null
+    id: 09b28c40-0029-4fed-bb1e-ad16b8e55cd8
+    jinja: '{% if job_title != "" %}
+
+      Given the job title of the speaker of the statement below is {{job_title}},
+      guess the speaker:
+
+
+      {{statement}} |||
+
+      {{speaker}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_6
+    reference: ''
+  25af7532-2351-4883-843c-fd5e28ab8668: !Template
+    answer_choices: null
+    id: 25af7532-2351-4883-843c-fd5e28ab8668
+    jinja: 'What is the party affiliation of the speaker of the following statement?
+
+
+      {{statement}} |||
+
+
+      {{party_affiliation}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_2
+    reference: ''
+  a22bff2e-ddb8-4c2c-b7a7-fa6847648fd5: !Template
+    answer_choices: null
+    id: a22bff2e-ddb8-4c2c-b7a7-fa6847648fd5
+    jinja: '{% if job_title != "" %}
+
+      Given the following statement:
+
+
+      {{statement}}
+
+
+      and the following speaker: {{speaker}}, predict the speaker''s job title. |||
+
+      {{job_title}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_4
+    reference: ''
+  b3618fb1-ad56-47bc-b6ff-393b9c24992b: !Template
+    answer_choices: null
+    id: b3618fb1-ad56-47bc-b6ff-393b9c24992b
+    jinja: '{% if job_title != "" %}
+
+      Guess the context in which the statement below was stated by speaker {{speaker}},
+      who is a {{job_title}}
+
+
+      {{statement}} |||
+
+      {{context}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_5
+    reference: ''
+  d153b4d3-d0a2-4768-854a-582440943c64: !Template
+    answer_choices: false ||| half-true ||| mostly-true ||| true ||| barely-true |||
+      pants-fire
+    id: d153b4d3-d0a2-4768-854a-582440943c64
+    jinja: 'Given the statement below:
+
+
+      {{statement}}
+
+
+      In which of the following categories {{"false", "half-true", "mostly-true",
+      "true", "barely-true", "pants-fire"}} would you label it based on its truthfulness?
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_1
+    reference: ''
+  f61239e3-06fc-4397-93ce-f3fca7340428: !Template
+    answer_choices: null
+    id: f61239e3-06fc-4397-93ce-f3fca7340428
+    jinja: 'Given the following statement:
+
+
+      {{statement}}
+
+
+      To what subject would you categorize it? |||
+
+      {{subject.split(",")[0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_3
+    reference: ''
diff --git a/promptsource/templates/limit/templates.yaml b/promptsource/templates/limit/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..27f846ce77e77d663a49de4dc40b372b2d9f6647
--- /dev/null
+++ b/promptsource/templates/limit/templates.yaml
@@ -0,0 +1,189 @@
+dataset: limit
+templates:
+  0898caf1-f3e4-493f-a838-84a678176c14: !Template
+    answer_choices: null
+    id: 0898caf1-f3e4-493f-a838-84a678176c14
+    jinja: '{{sentence}}
+
+
+      What is the last entity in motion mentioned in the sentence if any?
+
+      |||
+
+      {% if (motion_entities  | length) > 0 %}
+
+      {{ (motion_entities | sort(attribute="start_index") | last)["entity"] }}
+
+      {% else %}
+
+      {{"No entity in motion"}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: last_moving_entity
+    reference: ''
+  3b88c578-db77-4fd0-ad50-c78a39197ce5: !Template
+    answer_choices: null
+    id: 3b88c578-db77-4fd0-ad50-c78a39197ce5
+    jinja: '{{sentence}}
+
+
+      Are there any entities in motion in the sentence?
+
+
+      |||
+
+
+      {{motion}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: any_entity
+    reference: Asking if there is any entity in motion in the text
+  3f1689a9-b255-4d8d-b780-062ca2f83596: !Template
+    answer_choices: null
+    id: 3f1689a9-b255-4d8d-b780-062ca2f83596
+    jinja: "{{sentence}}\n\nWhat are the entities in motion in the previous sentence?\
+      \ Return {{\"'No entity'\"}} if you can't find any. \n\n|||\n{% if (motion_entities\
+      \  | length) == 0 %}\n{{ \"No entity\" }}\n{% else %}\n{{motion_entities | map(attribute=\"\
+      entity\") | join(\", \")}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: find_entities_question
+    reference: ''
+  74c9962e-3ec2-4f06-ace4-fcac6f506076: !Template
+    answer_choices: null
+    id: 74c9962e-3ec2-4f06-ace4-fcac6f506076
+    jinja: 'Extract: {{sentence}}
+
+
+      Is there more than one mention of a moving entity in the extract?
+
+
+      |||
+
+      {% if (motion_entities  | length) > 1 %}
+
+      {{ "Yes" }}
+
+      {% else %}
+
+      {{ "No" }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: more_than_one
+    reference: ''
+  766ab346-6fa6-4496-915f-65e7b06ab8ac: !Template
+    answer_choices: null
+    id: 766ab346-6fa6-4496-915f-65e7b06ab8ac
+    jinja: '{{sentence}}
+
+      How many moving entities are mentioned in the sentence?
+
+      |||
+
+      {{motion_entities | length}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: count_entities
+    reference: ''
+  957deab1-7570-4cbf-a31f-55bfad5212a7: !Template
+    answer_choices: null
+    id: 957deab1-7570-4cbf-a31f-55bfad5212a7
+    jinja: "Name the entities in motion in the following sentence. Respond {{\"'No\
+      \ entity'\"}} if you can't find any. \n\n{{sentence}}\n\n|||\n\n{% if (motion_entities\
+      \ | length) == 0 %}\n{{\"No entity\"}}\n{% else %}\n{{motion_entities | map(attribute=\"\
+      entity\") | join(\", \")}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: find_entities_affirm
+    reference: ''
+  af2203ba-d176-4981-82bd-088ef0c39214: !Template
+    answer_choices: null
+    id: af2203ba-d176-4981-82bd-088ef0c39214
+    jinja: '{{sentence}}
+
+
+      Name the first entity in motion mentioned in the sentence if any
+
+
+      |||
+
+
+      {% if (motion_entities  | length) > 0 %}
+
+      {{ (motion_entities | sort(attribute="start_index") | first)["entity"] }}
+
+      {% else %}
+
+      {{"No entity in motion"}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: first_moving_entity
+    reference: ''
+  b847d63c-0b52-4b6e-a62f-12e47439ce54: !Template
+    answer_choices: null
+    id: b847d63c-0b52-4b6e-a62f-12e47439ce54
+    jinja: 'Count the number of moving entities in the following sentence.
+
+      {{sentence}}
+
+      |||
+
+      {{motion_entities | length}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: count_entities_affirm
+    reference: ''
+  e5482b0d-ed6e-44de-a6e9-b64cdd1e2013: !Template
+    answer_choices: null
+    id: e5482b0d-ed6e-44de-a6e9-b64cdd1e2013
+    jinja: 'Is there any reference to movement in the following sentence?
+
+
+      {{sentence}}
+
+
+      |||
+
+
+      {{motion}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: any_entity_indirect
+    reference: Indirectly asking whether there are moving entities
+  e8fca13b-7063-4ebc-9a4d-c124398cacf4: !Template
+    answer_choices: null
+    id: e8fca13b-7063-4ebc-9a4d-c124398cacf4
+    jinja: "Extract: {{sentence}}\n\nCan you find all mentions of moving entities\
+      \ in the extract? Return {{\"'No entity'\"}} if you can't find any. \n\n|||\n\
+      {% if (motion_entities  | length) == 0 %}\n{{ \"No entity\" }}\n{% else %}\n\
+      {{motion_entities | map(attribute=\"entity\") | join(\", \")}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: find_entities_extract
+    reference: ''
diff --git a/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bfd7e11cb965b17b42214e040a91c21d0522368f
--- /dev/null
+++ b/promptsource/templates/math_dataset/algebra__linear_1d/templates.yaml
@@ -0,0 +1,107 @@
+dataset: math_dataset
+subset: algebra__linear_1d
+templates:
+  10a6ab6c-51f1-45cf-9176-54764bb6b612: !Template
+    answer_choices: null
+    id: 10a6ab6c-51f1-45cf-9176-54764bb6b612
+    jinja: '{{question}}
+
+      What is the solution to the previous algebraic expression?
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_after
+    reference: Simple question after expression
+  1e769483-a2e1-4829-8bf4-72a160477093: !Template
+    answer_choices: null
+    id: 1e769483-a2e1-4829-8bf4-72a160477093
+    jinja: '{{question}}
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: no_question_extracting_variable_name
+    reference: Provide solution with no question extracting the variable name
+  674b8811-faaf-45cf-ae5d-bdd40050273c: !Template
+    answer_choices: null
+    id: 674b8811-faaf-45cf-ae5d-bdd40050273c
+    jinja: '{{question}}
+
+      What is the solution to the previous algebraic expression?
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_after_extracting_variable_name
+    reference: Simple question after expression extracting variable name
+  77893b3c-d16e-4a6d-a171-aa21697b8bb7: !Template
+    answer_choices: null
+    id: 77893b3c-d16e-4a6d-a171-aa21697b8bb7
+    jinja: 'What is the solution to the following algebraic expression?
+
+      {{question}}
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_before_extracting_variable_name
+    reference: Simple question before expression extracting variable name
+  99f9f0d5-7a36-4b14-b80c-2540e7c8d3f4: !Template
+    answer_choices: null
+    id: 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f4
+    jinja: 'What is the solution to the following algebraic expression?
+
+      {{question}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_before
+    reference: Simple question before expression
+  e4f26ee0-c02c-4355-a242-c2b213b8761b: !Template
+    answer_choices: null
+    id: e4f26ee0-c02c-4355-a242-c2b213b8761b
+    jinja: '{{question}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: no_question
+    reference: Provide solution with no question
diff --git a/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1e270e37dd9c35325026922988b9b15f0be73123
--- /dev/null
+++ b/promptsource/templates/math_dataset/algebra__linear_1d_composed/templates.yaml
@@ -0,0 +1,107 @@
+dataset: math_dataset
+subset: algebra__linear_1d_composed
+templates:
+  10a6ab6c-51f1-45cf-9176-54764bb6b613: !Template
+    answer_choices: null
+    id: 10a6ab6c-51f1-45cf-9176-54764bb6b613
+    jinja: '{{question}}
+
+      What is the solution to the previous algebraic expression?
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_after
+    reference: Simple question after expression
+  1e769483-a2e1-4829-8bf4-72a160477094: !Template
+    answer_choices: null
+    id: 1e769483-a2e1-4829-8bf4-72a160477094
+    jinja: '{{question}}
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: no_question_extracting_variable_name
+    reference: Provide solution with no question extracting the variable name
+  77893b3c-d16e-4a6d-a171-aa21697b8bb8: !Template
+    answer_choices: null
+    id: 77893b3c-d16e-4a6d-a171-aa21697b8bb8
+    jinja: 'What is the solution to the following algebraic expression?
+
+      {{question}}
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_before_extracting_variable_name
+    reference: Simple question before expression extracting variable name
+  7ac3e795-8b2e-4391-984f-e57ed2d1e3fc: !Template
+    answer_choices: null
+    id: 7ac3e795-8b2e-4391-984f-e57ed2d1e3fc
+    jinja: '{{question}}
+
+      What is the solution to the previous algebraic expression?
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_after_extracting_variable_name
+    reference: Simple question after expression extracting variable name
+  99f9f0d5-7a36-4b14-b80c-2540e7c8d3f5: !Template
+    answer_choices: null
+    id: 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f5
+    jinja: 'What is the solution to the following algebraic expression?
+
+      {{question}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_before
+    reference: Simple question before expression
+  e4f26ee0-c02c-4355-a242-c2b213b8761c: !Template
+    answer_choices: null
+    id: e4f26ee0-c02c-4355-a242-c2b213b8761c
+    jinja: '{{question}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: no_question
+    reference: Provide solution with no question
diff --git a/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..74fab40eddc5c452a61ae65f4bce4af31b34b15e
--- /dev/null
+++ b/promptsource/templates/math_dataset/algebra__linear_2d/templates.yaml
@@ -0,0 +1,107 @@
+dataset: math_dataset
+subset: algebra__linear_2d
+templates:
+  10a6ab6c-51f1-45cf-9176-54764bb6b614: !Template
+    answer_choices: null
+    id: 10a6ab6c-51f1-45cf-9176-54764bb6b614
+    jinja: '{{question}}
+
+      What is the solution to the previous algebraic expression?
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_after
+    reference: Simple question after expression
+  1e769483-a2e1-4829-8bf4-72a160477095: !Template
+    answer_choices: null
+    id: 1e769483-a2e1-4829-8bf4-72a160477095
+    jinja: '{{question}}
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: no_question_extracting_variable_name
+    reference: Provide solution with no question extracting the variable name
+  674b8811-faaf-45cf-ae5d-bdd40050273e: !Template
+    answer_choices: null
+    id: 674b8811-faaf-45cf-ae5d-bdd40050273e
+    jinja: '{{question}}
+
+      What is the solution to the previous algebraic expression?
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_after_extracting_variable_name
+    reference: Simple question after expression extracting variable name
+  77893b3c-d16e-4a6d-a171-aa21697b8bb9: !Template
+    answer_choices: null
+    id: 77893b3c-d16e-4a6d-a171-aa21697b8bb9
+    jinja: 'What is the solution to the following algebraic expression?
+
+      {{question}}
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_before_extracting_variable_name
+    reference: Simple question before expression extracting variable name
+  99f9f0d5-7a36-4b14-b80c-2540e7c8d3f6: !Template
+    answer_choices: null
+    id: 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f6
+    jinja: 'What is the solution to the following algebraic expression?
+
+      {{question}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_before
+    reference: Simple question before expression
+  e4f26ee0-c02c-4355-a242-c2b213b8761d: !Template
+    answer_choices: null
+    id: e4f26ee0-c02c-4355-a242-c2b213b8761d
+    jinja: '{{question}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: no_question
+    reference: Provide solution with no question
diff --git a/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml b/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ac5e6134b0daf3ae3295b69879022a4d9a849136
--- /dev/null
+++ b/promptsource/templates/math_dataset/algebra__linear_2d_composed/templates.yaml
@@ -0,0 +1,107 @@
+dataset: math_dataset
+subset: algebra__linear_2d_composed
+templates:
+  10a6ab6c-51f1-45cf-9176-54764bb6b615: !Template
+    answer_choices: null
+    id: 10a6ab6c-51f1-45cf-9176-54764bb6b615
+    jinja: '{{question}}
+
+      What is the solution to the previous algebraic expression?
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_after
+    reference: Simple question after expression
+  1e769483-a2e1-4829-8bf4-72a160477096: !Template
+    answer_choices: null
+    id: 1e769483-a2e1-4829-8bf4-72a160477096
+    jinja: '{{question}}
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: no_question_extracting_variable_name
+    reference: Provide solution with no question extracting the variable name
+  674b8811-faaf-45cf-ae5d-bdd40050273f: !Template
+    answer_choices: null
+    id: 674b8811-faaf-45cf-ae5d-bdd40050273f
+    jinja: '{{question}}
+
+      What is the solution to the previous algebraic expression?
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_after_extracting_variable_name
+    reference: Simple question after expression extracting variable name
+  77893b3c-d16e-4a6d-a171-aa21697b8bc7: !Template
+    answer_choices: null
+    id: 77893b3c-d16e-4a6d-a171-aa21697b8bc7
+    jinja: 'What is the solution to the following algebraic expression?
+
+      {{question}}
+
+      {% set variable_name = question[-2] %}
+
+      {{variable_name}}=
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_before_extracting_variable_name
+    reference: Simple question before expression extracting variable name
+  99f9f0d5-7a36-4b14-b80c-2540e7c8d3f7: !Template
+    answer_choices: null
+    id: 99f9f0d5-7a36-4b14-b80c-2540e7c8d3f7
+    jinja: 'What is the solution to the following algebraic expression?
+
+      {{question}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simple_template_question_before
+    reference: Simple question before expression
+  e4f26ee0-c02c-4355-a242-c2b213b8761e: !Template
+    answer_choices: null
+    id: e4f26ee0-c02c-4355-a242-c2b213b8761e
+    jinja: '{{question}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: no_question
+    reference: Provide solution with no question
diff --git a/promptsource/templates/math_qa/templates.yaml b/promptsource/templates/math_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6e711ec9ec69125be9d761760af502a08cee9cef
--- /dev/null
+++ b/promptsource/templates/math_qa/templates.yaml
@@ -0,0 +1,86 @@
+dataset: math_qa
+templates:
+  46195182-c216-4179-a60f-16637a86306d: !Template
+    answer_choices: null
+    id: 46195182-c216-4179-a60f-16637a86306d
+    jinja: 'Given the problem below, what is the {{"annotated formula"}} you would
+      use to solve it?
+
+      ===
+
+      {{Problem}} |||
+
+      {{annotated_formula}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_5
+    reference: ''
+  6312d599-8ca4-4bc8-a76f-81f2e36727bd: !Template
+    answer_choices: null
+    id: 6312d599-8ca4-4bc8-a76f-81f2e36727bd
+    jinja: 'Given the following problem:
+
+      {{Problem}}
+
+      ===
+
+      and the following options, select the correct option
+
+      {{options}}|||
+
+      {{correct}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_3
+    reference: ''
+  82345ddb-f2e3-46dc-8283-531a3bd000e9: !Template
+    answer_choices: null
+    id: 82345ddb-f2e3-46dc-8283-531a3bd000e9
+    jinja: "Given the problem:\n{{Problem}}\n===\nand the options:\n{{options}}\n\
+      ===\nProvide a rationale for how you would solve the problem.  |||\n{{Rationale}}\
+      \ \n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_1
+    reference: ''
+  a3c2ec72-4af5-42aa-9e8e-ef475fa7c039: !Template
+    answer_choices: null
+    id: a3c2ec72-4af5-42aa-9e8e-ef475fa7c039
+    jinja: 'Given the problem below, in what category would you classify it?
+
+      ===
+
+      {{Problem}} |||
+
+      {{category}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_2
+    reference: ''
+  b4881325-de90-4cf5-8100-d4a2e13aa017: !Template
+    answer_choices: null
+    id: b4881325-de90-4cf5-8100-d4a2e13aa017
+    jinja: 'Given the problem below, what is the {{"linear formula"}} you would use
+      to solve it?
+
+      ===
+
+      {{Problem}} |||
+
+      {{linear_formula}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Template_4
+    reference: ''
diff --git a/promptsource/templates/mc_taco/templates.yaml b/promptsource/templates/mc_taco/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e64da4e49bde1c8a3d36456891dbd2634fadb5fd
--- /dev/null
+++ b/promptsource/templates/mc_taco/templates.yaml
@@ -0,0 +1,225 @@
+dataset: mc_taco
+templates:
+  1b27afce-9748-44bd-9d82-9db4b815c292: !Template
+    answer_choices: No ||| Yes
+    id: 1b27afce-9748-44bd-9d82-9db4b815c292
+    jinja: 'Given the context,
+
+
+      {{sentence}}
+
+
+      observe the following QA pair and check if the answer is plausible:
+
+
+      Question: {{question}}
+
+
+      Answer: {{answer}} |||
+
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: observe_check_plausible_yes_no
+    reference: Basic Context, QA Pair, ask for plausibility
+  38ab730f-1ed8-4362-99e1-c0d305aa056e: !Template
+    answer_choices: plausible ||| implausible
+    id: 38ab730f-1ed8-4362-99e1-c0d305aa056e
+    jinja: "I've been grappling with the temporal accuracy of this answer for a while:\n\
+      \nQ: \"{{question}}\"\n\nI have the following information: \"{{sentence}}\"\n\
+      \nA: \"{{answer}}\" \n\nThis answer is definitely not ||| {{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: plausible_negated
+    reference: Context, QA Pair, Negation
+  5bc98cb7-350e-471e-b986-ad52a46f403c: !Template
+    answer_choices: Event Duration ||| Event Ordering ||| Frequency ||| Typical Time
+      ||| Stationarity
+    id: 5bc98cb7-350e-471e-b986-ad52a46f403c
+    jinja: 'There are five temporal categories: {{"Event Duration"}}, {{"Event Ordering"}},
+      {{"Frequency"}}, {{"Typical Time"}}, {{"Stationarity"}}.
+
+
+      Out of the above temporal categories, which one does the question "{{question}}"
+      belong to? |||
+
+      {{answer_choices[category]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: temporal_categories_with_choices
+    reference: Temporal categories as part of the prompt
+  5e5cedef-b943-439a-a75a-1140478b0620: !Template
+    answer_choices: null
+    id: 5e5cedef-b943-439a-a75a-1140478b0620
+    jinja: '{% if label %}
+
+      I have the following passage:
+
+
+      {{sentence}}
+
+
+      My query is: "{{question}}"
+
+
+      I want an answer that is "temporally plausible". |||
+
+
+      {{answer}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_answer_from_question_and_context
+    reference: Generate answer from question+context (if plausible)
+  8423a3fa-adcf-4d36-b639-774bd13ac3fe: !Template
+    answer_choices: No ||| Yes
+    id: 8423a3fa-adcf-4d36-b639-774bd13ac3fe
+    jinja: 'Here''s what happened: {{sentence}}
+
+
+      I asked my friend {{question}}
+
+
+      and they said {{answer}}
+
+
+      Should I believe them?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: asked_my_friend
+    reference: ''
+  89aaa7f3-d409-4a27-acd5-a207b431b12c: !Template
+    answer_choices: No ||| Yes
+    id: 89aaa7f3-d409-4a27-acd5-a207b431b12c
+    jinja: 'Given the context, the question, and the candidate answer, the task is
+      to determine whether the candidate answer is plausible ("yes") or not ("no").
+
+
+      Context: {{sentence}}
+
+
+      Question: {{question}}
+
+
+      Candidate answer: {{answer}}
+
+      |||
+
+      {{answer_choices[label]}}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: formal_description
+    reference: Taken from the description of the dataset.
+  a2896c7d-d443-4a3a-847c-9896a166a4b5: !Template
+    answer_choices: No ||| Yes
+    id: a2896c7d-d443-4a3a-847c-9896a166a4b5
+    jinja: 'Given the context,
+
+
+      {{sentence}}
+
+
+      and the question,
+
+
+      {{question}}
+
+
+      is the following answer believable?
+
+
+      {{answer}} |||
+
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: believable
+    reference: ''
+  b08c4c20-f8a2-4bdb-8a9b-235f782c7386: !Template
+    answer_choices: False ||| True
+    id: b08c4c20-f8a2-4bdb-8a9b-235f782c7386
+    jinja: 'True/False?
+
+
+      "{{answer}}" is a plausible answer to "{{question}}", given "{{sentence}}" |||
+
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: plausible_true_false
+    reference: Context, QA Pair, T/F question
+  df9ad236-1385-48ea-b056-171aa3f8d0bd: !Template
+    answer_choices: Event Duration ||| Event Ordering ||| Frequency ||| Typical Time
+      ||| Stationarity
+    id: df9ad236-1385-48ea-b056-171aa3f8d0bd
+    jinja: 'Which temporal category does the question "{{question}}" belong to? |||
+
+
+      {{answer_choices[category]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: temporal_categories_no_choices
+    reference: Question provided, predict Temporal Category
+  fb4f8f70-c1cc-4004-97a5-cd131259d318: !Template
+    answer_choices: Yes ||| No
+    id: fb4f8f70-c1cc-4004-97a5-cd131259d318
+    jinja: 'Here''s what happened: {{sentence}}
+
+
+      I asked my friend {{question}}
+
+
+      and they said {{answer}}
+
+
+      Should I doubt them?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: asked_my_friend_doubt
+    reference: ''
diff --git a/promptsource/templates/mdd/task1_qa/templates.yaml b/promptsource/templates/mdd/task1_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bfcf4c384c3b0029182d82cb72998445db69bb36
--- /dev/null
+++ b/promptsource/templates/mdd/task1_qa/templates.yaml
@@ -0,0 +1,24 @@
+dataset: mdd
+subset: task1_qa
+templates:
+  59b9d82e-b778-429c-a45c-a27d6abdf13a: !Template
+    answer_choices: null
+    id: 59b9d82e-b778-429c-a45c-a27d6abdf13a
+    jinja: '{{dialogue_turns.utterance[0]}}|||{{dialogue_turns.utterance[1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: question_answering
+    reference: Given a question, return the answer.
+  bedf40a1-630a-4aae-ad2f-cfc90f77fb9f: !Template
+    answer_choices: null
+    id: bedf40a1-630a-4aae-ad2f-cfc90f77fb9f
+    jinja: 'Generate a movie-trivia question for this answer: {{ dialogue_turns.utterance[1]
+      }}|||{{ dialogue_turns.utterance[0] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: generate_question
+    reference: Given the answer, generate a question related to it.
diff --git a/promptsource/templates/mdd/task2_recs/templates.yaml b/promptsource/templates/mdd/task2_recs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..972fcb00dc0117f3d480268487392c79ffdc2713
--- /dev/null
+++ b/promptsource/templates/mdd/task2_recs/templates.yaml
@@ -0,0 +1,13 @@
+dataset: mdd
+subset: task2_recs
+templates:
+  6f0eb61c-d9f9-4e52-a317-3d7b8049eb9b: !Template
+    answer_choices: null
+    id: 6f0eb61c-d9f9-4e52-a317-3d7b8049eb9b
+    jinja: '{{dialogue_turns.utterance[0]}}|||{{dialogue_turns.utterance[1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: recommed_movies
+    reference: Given the likes,  recommend a movie.
diff --git a/promptsource/templates/mdd/task3_qarecs/templates.yaml b/promptsource/templates/mdd/task3_qarecs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b5a2be1b2b6c4740122c9a3f23d36449fb508da0
--- /dev/null
+++ b/promptsource/templates/mdd/task3_qarecs/templates.yaml
@@ -0,0 +1,260 @@
+dataset: mdd
+subset: task3_qarecs
+templates:
+  1614890b-362c-4ee8-850d-841cf511d169: !Template
+    answer_choices: null
+    id: 1614890b-362c-4ee8-850d-841cf511d169
+    jinja: '{% if dialogue_turns.utterance|length==6%}
+
+      This is a film-related dialogue between Speaker 1 and Speaker 2. Complete Speaker
+      2''s intervention to answer Speaker 1''s question.
+
+
+      Speaker 1: {{dialogue_turns.utterance[0]}}
+
+
+      Speaker 2: {{dialogue_turns.utterance[1]}}
+
+
+      Speaker 1: {{dialogue_turns.utterance[2]}}
+
+
+      Speaker 2:|||{{dialogue_turns.utterance[3]}}
+
+      {% else %}
+
+      |||
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: next_utterance_4_for_6
+    reference: Given the first three dialogues,  generate next utterance for dialogues
+      of length 6.
+  3e5a19e5-aa33-467a-bcd2-f84d99f32759: !Template
+    answer_choices: null
+    id: 3e5a19e5-aa33-467a-bcd2-f84d99f32759
+    jinja: '{% set context_init= ["", "Someone said", "He said", "She said", "They
+      said", "A friend asked me", "A colleague asked me"]|choice %}
+
+      {{context_init}}
+
+      {% if context_init =="" %}{{dialogue_turns.utterance[0]}}|||{{dialogue_turns.utterance[1]}}{%
+      else %}"{{dialogue_turns.utterance[0]}}". Which movie will you recommend?|||{{dialogue_turns.utterance[1]}}{%
+      endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: recommend_movie_1
+    reference: Given likes, recommend a movie.
+  76888d6e-76fa-47db-a8b3-9980f082df51: !Template
+    answer_choices: null
+    id: 76888d6e-76fa-47db-a8b3-9980f082df51
+    jinja: ' {% set context_init = ["", "Someone said", "He said", "She said", "They
+      asked", "A friend asked me", "A colleague asked me"]|choice %} {% set pronoun
+      = "he" %}
+
+      {% if dialogue_turns.utterance|length==6 %}
+
+      {% if "He" in context_init %}
+
+      {% set pronoun = "he" %}
+
+      {% elif "She" in context_init %}
+
+      {% set pronoun = "she" %}
+
+      {% elif "They" in context_init or "Someone" in context_init%}
+
+      {% set pronoun = "they" %}
+
+      {% elif "colleague" in context_init or "friend" in context_init %}
+
+      {% set pronoun = ["he","she","they"]|choice %} {%endif%}
+
+      {{context_init}}{% if context_init=="" %}{{dialogue_turns.utterance[4]}}|||{{dialogue_turns.utterance[5]}}
+
+      {% else %} "{{dialogue_turns.utterance[4]}}". Which movie do you think {{pronoun}}
+      will like?|||{{dialogue_turns.utterance[5]}}{% endif %}
+
+      {% else %}
+
+      |||
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: recommend_movie_2
+    reference: Given a single preference, recommend a movie. Only works for dialogues
+      with 6 utterances.
+  91f33bcf-3c0e-49e6-ae86-28f77e224734: !Template
+    answer_choices: null
+    id: 91f33bcf-3c0e-49e6-ae86-28f77e224734
+    jinja: ' {% set context_init= ["I am aware", "Someone is aware", "He is aware",
+      "She is aware", "They are aware", "A friend is aware", "A colleague is aware",
+      "A person is aware", "I know", "Someone knows", "He knows", "She knows", "They
+      know", "A friend knows", "A colleague knows", "A person knows"]|choice %}
+
+      {% set pronoun = "he" %} {% set pronoun_2 = "him" %} {% set choice_idx = 0 %}
+
+      {% if dialogue_turns.utterance|length==6 %}
+
+      {% if "He" in context_init %}
+
+      {% set pronoun = "he" %}
+
+      {% set pronoun_2 = "him" %}
+
+      {% elif "I" in context_init %}
+
+      {% set pronoun = "I" %}
+
+      {% set pronoun_2 = "me" %}
+
+      {% elif "She" in context_init %}
+
+      {% set pronoun = "she" %}
+
+      {% set pronoun_2 = "her" %}
+
+      {% elif "They" in context_init or "Someone" in context_init or "person" in context_init%}
+
+      {% set pronoun = "they" %}
+
+      {% set pronoun_2 = "them" %}
+
+      {% elif "colleague" in context_init or "friend" in context_init %}
+
+      {% set choice_idx = range(3)|list|choice %}
+
+      {% set pronoun = ["he","she","they"][choice_idx] %}
+
+      {% set pronoun_2 = ["him","her","them"][choice_idx] %}
+
+      {%endif%}
+
+      {{context_init}} that the movie: "{{dialogue_turns.utterance[1]}}", is related
+      to: {{dialogue_turns.utterance[3]}}.
+
+      Also, {% if pronoun!="I" %}{{pronoun}} said "{{dialogue_turns.utterance[4]}}".
+      Can you recommend a movie for {{pronoun_2}} please?|||{{dialogue_turns.utterance[5]}}{%else%}{{dialogue_turns.utterance[4]}}|||{{dialogue_turns.utterance[5]}}{%
+      endif %}
+
+      {% else %}
+
+      |||
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: recommend_movie_3
+    reference: Given previous suggestion, some movie description, and preference,
+      recommend a movie.
+  92cb9273-b89f-410f-a7f2-db8c84f42862: !Template
+    answer_choices: null
+    id: 92cb9273-b89f-410f-a7f2-db8c84f42862
+    jinja: ' {% set context_init= ["", "He said", "She said", "They said", "Someone
+      said", "A friend said", "A colleague said", "A person said"]|choice %}
+
+      {% set pronoun = "he" %} {% set pronoun_2 = "him" %} {% set choice_idx = 0 %}
+      {% if dialogue_turns.utterance|length==6 %}
+
+      {% if "He" in context_init %}
+
+      {% set pronoun = "he" %}
+
+      {% set pronoun_2 = "him" %}
+
+      {% elif "She" in context_init %}
+
+      {% set pronoun = "she" %}
+
+      {% set pronoun_2 = "her" %}
+
+      {% elif "They" in context_init or "Someone" in context_init or "person" in context_init%}
+
+      {% set pronoun = "they" %}
+
+      {% set pronoun_2 = "them" %}
+
+      {% elif "colleague" in context_init or "friend" in context_init %}
+
+      {% set choice_idx = range(3)|list|choice %}
+
+      {% set pronoun = ["he","she","they"][choice_idx] %}
+
+      {% set pronoun_2 = ["him","her","them"][choice_idx] %}
+
+      {%endif%}
+
+      {% if context_init!="" %}
+
+      {{context_init}} "{{dialogue_turns.utterance[0]}}". "{{dialogue_turns.utterance[4]}}",
+      {{pronoun}} added. Please recommend a movie for {{pronoun_2}}.|||{{dialogue_turns.utterance[5]}}
+
+      {%else%}
+
+      {{dialogue_turns.utterance[0]}} Also, {{dialogue_turns.utterance[4]}}|||{{dialogue_turns.utterance[5]}}
+
+      {% endif %}
+
+      {% else %}
+
+      |||
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: recommend_movie_4
+    reference: Given likes and preferences, recommend a movie.
+  de1179b3-b6d6-4acf-9b0a-82cb2fa9d58f: !Template
+    answer_choices: null
+    id: de1179b3-b6d6-4acf-9b0a-82cb2fa9d58f
+    jinja: 'Complete this movie-trivia-related dialogue between Speaker 1 and Speaker
+      2 by answering Speaker 1''s question as Speaker 2.
+
+
+      Speaker 1: {{dialogue_turns.utterance[0]}}
+
+
+      Speaker 2: {{dialogue_turns.utterance[1]}}
+
+
+      Speaker 1: {{dialogue_turns.utterance[2]}}
+
+
+      {% if dialogue_turns.utterance|length==6 %} Speaker 2: {{dialogue_turns.utterance[3]}}
+
+
+      Speaker 1: {{dialogue_turns.utterance[4]}}
+
+
+      {% endif %} Speaker 2:|||{{dialogue_turns.utterance[-1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: next_utterance_4_and_6
+    reference: Given the first dialogues, return the next utterance.
+  e37a6f9c-344c-4b85-a41f-85bb84bab934: !Template
+    answer_choices: null
+    id: e37a6f9c-344c-4b85-a41f-85bb84bab934
+    jinja: 'For the movie - {{dialogue_turns.utterance[1]}}, answer this question:
+
+
+      {{dialogue_turns.utterance[2]}}|||{{dialogue_turns.utterance[3]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: qa_about movie
+    reference: Given the movie name and a question, answer the question.
diff --git a/promptsource/templates/medical_questions_pairs/templates.yaml b/promptsource/templates/medical_questions_pairs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4af24284f9b348b7275e7a8b322a88f16c9e4e71
--- /dev/null
+++ b/promptsource/templates/medical_questions_pairs/templates.yaml
@@ -0,0 +1,113 @@
+dataset: medical_questions_pairs
+templates:
+  18c92f97-0655-4f67-aca1-69f8e4fbb11e: !Template
+    answer_choices: not a paraphrase of the previous question. ||| a paraphrase of
+      the previous question.
+    id: 18c92f97-0655-4f67-aca1-69f8e4fbb11e
+    jinja: "In the context of healthcare questionnaires, it is often necessary to\
+      \ find out if two questions are paraphrases of one another. Given the following\
+      \ question:\n\nQuestion 1: {{question_1}}\n\nWhat inference can be made about\
+      \ the next question?\n\nQuestion 2: {{question_2}}\n\nThis question is \n\n\
+      |||\n\n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic_v5
+    reference: ''
+  316f2ff7-45f8-4997-9c5f-dfe5fb7f9808: !Template
+    answer_choices: False ||| True
+    id: 316f2ff7-45f8-4997-9c5f-dfe5fb7f9808
+    jinja: "Question 1: {{question_1}}\n\nand\n\nQuestion 2: {{question_2}}\n\nIs\
+      \ it True or False that the two questions above are paraphrases of each other?\n\
+      \n|||\n\n {{answer_choices[label]}} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic_v6
+    reference: ''
+  535ed335-de9d-41df-a026-28487c832bfa: !Template
+    answer_choices: No they do not mean the same thing. ||| They mean the same thing.
+    id: 535ed335-de9d-41df-a026-28487c832bfa
+    jinja: 'Question: In the context of healthcare do the following questions mean
+      the same thing?
+
+
+      Question 1: {{question_1}}
+
+
+      Question 2: {{question_2}}
+
+
+      |||
+
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic_v2
+    reference: template about question pattern
+  681dc0d2-a771-41ae-aa00-d1f59ab01197: !Template
+    answer_choices: not duplicates ||| duplicates
+    id: 681dc0d2-a771-41ae-aa00-d1f59ab01197
+    jinja: "Question 1: {{question_1}}\n\nQuestion 2: {{question_2}}\n\nPick one of\
+      \ the following options:\nQuestions are {{\"not duplicates\"}} or {{\"duplicates\"\
+      }}:\n\n|||\n\n {{ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic_v8
+    reference: ''
+  7be2b267-8d5c-466b-9fd4-1fbbae442938: !Template
+    answer_choices: no ||| yes
+    id: 7be2b267-8d5c-466b-9fd4-1fbbae442938
+    jinja: "Question 1: {{question_1}}\n\nQuestion 2: {{question_2}}\n\nQuestion:\
+      \ Is Question 1 asking the same question as Question 2? Yes or No?\n\n\n|||\n\
+      \n {{ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic_v1
+    reference: context of healthcare
+  854ebbe0-8968-4967-a346-4e4d6f98cf73: !Template
+    answer_choices: False ||| True
+    id: 854ebbe0-8968-4967-a346-4e4d6f98cf73
+    jinja: "Question 1: {{question_1}}\n\nOne possible way of paraphrasing the same\
+      \ question is : \n\nQuestion 2: {{question_2}}\n\nTrue or False?\n\n|||\n\n\
+      \ {{ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic_v4
+    reference: ''
+  b388913a-9b0d-43a1-8bf9-83319ebf38b2: !Template
+    answer_choices: null
+    id: b388913a-9b0d-43a1-8bf9-83319ebf38b2
+    jinja: "Question 1: {{question_1}}\n\nand\n\nQuestion 2: {{question_2}}\n\nAfter\
+      \ reading the two questions above:\n\nA clinician will\n\n|||\n\n{% if label\
+      \ == 0 %} \n not agree that they are paraphrases of each other.\n{% elif label\
+      \ == 1 %}\n agree that they are paraphrases of each other.\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic_v7
+    reference: Using knowledge of dataset creation
+  c8df74ce-0ae7-4e70-9322-aaf9921ae3b1: !Template
+    answer_choices: dissimilar ||| similar
+    id: c8df74ce-0ae7-4e70-9322-aaf9921ae3b1
+    jinja: "The two questions are either {{\"similar\"}} or {{\"dissimilar\"}} questions\
+      \ in the medical context. Which is it?\n\n{{question_1}} \n\nand\n\n{{question_2}}\n\
+      \n\nThe two questions are \n|||\n\n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic_v3
+    reference: ''
diff --git a/promptsource/templates/meta_woz/dialogues/templates.yaml b/promptsource/templates/meta_woz/dialogues/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b7bc49bfcc410c26be9eff315a716cdeed21dcbe
--- /dev/null
+++ b/promptsource/templates/meta_woz/dialogues/templates.yaml
@@ -0,0 +1,159 @@
+dataset: meta_woz
+subset: dialogues
+templates:
+  4b64c6e9-0aa0-431f-85b5-8367daa4773a: !Template
+    answer_choices: null
+    id: 4b64c6e9-0aa0-431f-85b5-8367daa4773a
+    jinja: "What does this conversation between a Chatbot and a client talk about\
+      \ ? \n{% for utterance in turns %}\n{{[\"Client\", \"Chatbot\"][loop.index %\
+      \ 2]}}: {{utterance}}\n{% endfor %}\n|||\n{{domain.replace('_', ' ') | lower\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: predict_domain_2
+    reference: ''
+  56151136-82a4-455b-98ed-aea6ee1c273d: !Template
+    answer_choices: null
+    id: 56151136-82a4-455b-98ed-aea6ee1c273d
+    jinja: '{% set count = namespace(value=0) %}
+
+      {% for i in range(range(2, turns|length) | random() - 1) %}
+
+      {{["AI Assistant", "Client"][i% 2]}}: {{turns[i]}}
+
+      {% set count.value= i + 1 %}
+
+      {% endfor %}
+
+      {{["AI Assistant", "Client"][ count.value % 2]}}:
+
+      |||
+
+      {{turns[count.value] }}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: predict_random
+    reference: ''
+  5e90f705-9d63-4917-acc9-3baabc6ee5e9: !Template
+    answer_choices: null
+    id: 5e90f705-9d63-4917-acc9-3baabc6ee5e9
+    jinja: "{% set rand_index= namespace(value=range(turns|length)|random()) %}\n\
+      {% for utterance in turns %}\n{% if loop.index0 == rand_index.value %}\n{{[\"\
+      Chatbot\", \"Human\"][loop.index0 % 2]}}: (blank)\n{% else %}\n{{[\"Chatbot\"\
+      , \"Human\"][loop.index0 % 2]}}: {{utterance}}\n{% endif %}\n{% endfor %}\n\
+      Fill in the blank \n|||\n{{turns[rand_index.value] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: 'predict_missing '
+    reference: ''
+  77a9e854-08ef-4f2d-86f9-ed077f18b39d: !Template
+    answer_choices: null
+    id: 77a9e854-08ef-4f2d-86f9-ed077f18b39d
+    jinja: "This conversation is between an AI assistant and a human. What is the\
+      \ human inquiring about ? \n{% for utterance in turns %}\n{{[\"Human\", \" AI\
+      \ assistant\"][loop.index % 2]}}: {{utterance}}\n{% endfor %}\n|||\n{{domain.replace('_',\
+      \ ' ') | lower }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: predict_domain_3
+    reference: ''
+  9150f1e0-8b9e-4f24-bc58-1cbb230cb8d9: !Template
+    answer_choices: null
+    id: 9150f1e0-8b9e-4f24-bc58-1cbb230cb8d9
+    jinja: "This conversation is between an AI assistant and a human. what does it\
+      \ talk about ? \n{% for utterance in turns %}\n{{[\"Human\", \" AI assistant\"\
+      ][loop.index % 2]}}: {{utterance}}\n{% endfor %}\n|||\n{{domain.replace('_',\
+      \ ' ') | lower }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: predict_domain_1
+    reference: ''
+  98b0697f-3144-40b6-b6ae-5d57c3f08db3: !Template
+    answer_choices: null
+    id: 98b0697f-3144-40b6-b6ae-5d57c3f08db3
+    jinja: "Chatbot: {{turns[0]}}\n\nHuman: {{turns[1]}}\n\nChatbot: \n|||\n{{turns[2]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: predict_next_stmt_1
+    reference: ''
+  a02a980d-630f-4845-8b4d-b0eb072110e9: !Template
+    answer_choices: null
+    id: a02a980d-630f-4845-8b4d-b0eb072110e9
+    jinja: '{% for utterance in turns[:-1] %}
+
+      {{["User", "Bot"][loop.index % 2]}}: {{utterance}}
+
+      {% endfor %}
+
+      {{["User", "Bot"][ turns | length % 2]}}:
+
+      |||
+
+      {{turns[-1]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: predict_last_stmt_3
+    reference: ''
+  a4a8c7a1-1747-4560-8365-e998b1d2cfdf: !Template
+    answer_choices: null
+    id: a4a8c7a1-1747-4560-8365-e998b1d2cfdf
+    jinja: '{% for utterance in turns[:-1] %}
+
+      {{["Human", "Chatbot"][loop.index % 2]}}: {{utterance}}
+
+      {% endfor %}
+
+      {{["Human", "Chatbot"][ turns | length % 2]}}:
+
+      |||
+
+      {{turns[-1]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: predict_last_stmt_1
+    reference: ''
+  eba8d3ba-88f8-4257-a252-b02eafcfc463: !Template
+    answer_choices: null
+    id: eba8d3ba-88f8-4257-a252-b02eafcfc463
+    jinja: '{% for utterance in turns[:-1] %}
+
+      {{["Client", "AI Assistant"][loop.index % 2]}}: {{utterance}}
+
+      {% endfor %}
+
+      {{["Client", "AI Assistant"][ turns | length % 2]}}:
+
+      |||
+
+      {{turns[-1]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: predict_last_stmt_2
+    reference: ''
diff --git a/promptsource/templates/mocha/templates.yaml b/promptsource/templates/mocha/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..19e9528b8d52953777e0011a0ad0f88c401a329d
--- /dev/null
+++ b/promptsource/templates/mocha/templates.yaml
@@ -0,0 +1,160 @@
+dataset: mocha
+templates:
+  1c390ee6-fab9-4b16-8028-2649fca56866: !Template
+    answer_choices: null
+    id: 1c390ee6-fab9-4b16-8028-2649fca56866
+    jinja: "On a scale of 1 to 5, how similar are these two sentences \n\n(1) \"{{candidate}}\"\
+      \n\n(2) \"{{reference}}\" \n\ngiven the question of \"{{ question }}\" and the\
+      \ context of \"{{ context }}\"?\n\n{% if candidate2 %}\nFollow-up question:\
+      \ What about these two sentences: \n\n(1) \"{{reference}}\"\n\n(2) \"{{ candidate2\
+      \ }}\"\n{% endif %}\n|||\n{{ score }} \n\n{{ score2 if candidate2 else \"\"\
+      \ }}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: (Regression) Scoring Candidate against Reference
+    reference: Similarity measure between candidate and reference answers (in a regression
+      manner)
+  2816084e-0193-4284-9a4f-9de4ae03e9d6: !Template
+    answer_choices: null
+    id: 2816084e-0193-4284-9a4f-9de4ae03e9d6
+    jinja: "Given the passage and the answers given below, generate a relevant question\
+      \ and pick the corresponding answer.\n\nPassage: {{ context }}\n\nAnswer 1 (Gold):\
+      \ {{ reference }}\n\nAnswer 2: {{ candidate }}\n\n{% if candidate2 %}\nAnswer\
+      \ 3: {{ candidate2 }}\n{% endif %} \n|||\n{{ question }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Generate Question
+    reference: Given passage and the answers, generate a question for the gold answer.
+  31e49d18-800f-4d16-8d84-86509db30499: !Template
+    answer_choices: null
+    id: 31e49d18-800f-4d16-8d84-86509db30499
+    jinja: "{% if score != 3 %}\nPerson A: {{ question }}\n\nPerson B: {{ reference\
+      \ }}\n\nPerson C: {{ candidate }}\n\n{% if candidate2 and score2 != 3 %}\nPerson\
+      \ D: {{ candidate2 }}\n{% endif %}\n\nDoes Person B give a similar answer as\
+      \ Person C? {{ \" What about Person B's answer to Person D's?\" if candidate2\
+      \ else \"\" }} Answer \"similar\" or \"not similar\".\n\n\n|||\n{{ [\"not similar\"\
+      , \"similar\"][score > 3] }} \n\n{{[\"not similar\", \"similar\"][score2 > 3]\
+      \ if candidate2 != \"\" else \"\"}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: (Classification) Scoring Candidate against Reference w/o Context
+    reference: Similarity measure between candidate and reference answers (in a classification
+      manner)
+  5098f807-5558-4d19-af12-7bb87cbc59f0: !Template
+    answer_choices: null
+    id: 5098f807-5558-4d19-af12-7bb87cbc59f0
+    jinja: "Question: {{ question }}\n\nAnswer A: \"{{reference}}\"\n\nAnswer B: \"\
+      {{candidate}}\" \n\n{% if candidate2 %}\nAnswer C: \"{{ candidate2 }}\"\n{%\
+      \ endif %}\n\nGive the similarity measure (on a scale of 1 to 5) for answers\
+      \ A and B. {{ \"Do the same for answers A and C.\" if candidate2 else \"\" }}\n\
+      \n|||\n{{ score }} \n\n{{ score2 if candidate2 else \"\" }}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: (Regression) Scoring Candidate against Reference w/o Context
+    reference: Similarity measure between candidate and reference answers (in a regression
+      manner)
+  6269d541-6e7b-48c1-ae7a-2808385c40c6: !Template
+    answer_choices: null
+    id: 6269d541-6e7b-48c1-ae7a-2808385c40c6
+    jinja: "{% if score != 3 %}\nPassage: {{ context }}\n\nQuestion: {{ question }}\n\
+      \nAnswer: {{ reference }}\n\nIs the answer \"{{ candidate }}\" similar to the\
+      \ answer above? Answer yes or no. \n\n{% if candidate2 and score2 != 3 %}\n\
+      Is the answer \"{{ candidate2 }}\" similar to the answer above? Answer yes or\
+      \ no. \n{% endif %}\n|||\n{{ [\"no\", \"yes\"][score > 3] }} \n\n{{[\"no\",\
+      \ \"yes\"][score2 > 3] if candidate2 != \"\" else \"\"}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: (Classification) Scoring Candidate against Reference
+    reference: Similarity measure between candidate and reference answers (in a classification
+      manner)
+  6570aa7f-de3d-489e-8565-72fb535b1f10: !Template
+    answer_choices: null
+    id: 6570aa7f-de3d-489e-8565-72fb535b1f10
+    jinja: "Sentence (1): \"{{candidate}}\"\n\nSentence (2): \"{{reference}}\" \n\n\
+      {% if candidate2 %}\nSentence (3): \"{{ candidate2 }}\"\n{% endif %}\n\nHow\
+      \ similar are Sentence (1) and (2)?{{ \" What about Sentence (2) and (3)?\"\
+      \ if candidate2 else \"\" }} Output the result value between 1 (completely different)\
+      \ and 5 (identical). \n|||\n{{ score }} \n\n{{ score2 if candidate2 else \"\"\
+      \ }}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: (Regression) Scoring Candidate against Reference w/o Question and Context
+    reference: Similarity measure between candidate and reference answers (in a regression
+      manner)
+  7ebdd3bc-4896-425b-b8c2-3e4ea3944de8: !Template
+    answer_choices: null
+    id: 7ebdd3bc-4896-425b-b8c2-3e4ea3944de8
+    jinja: '{{ context }}
+
+
+      Given the passage above, what is the answer to the question "{{ question }}"
+
+
+      You can refer to the following candidate answer(s):
+
+
+      Candidate Answer 1: {{ candidate }}
+
+
+      Candidate Answer 2: {{ candidate2 if candidate2 else "-" }}
+
+      |||
+
+      {{ reference }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Generate Correct Answer using Noisy Candidate
+    reference: Given the passage, the question, and the candidate answer (that may
+      be incorrect), generate the correct answer.
+  900786a8-1841-438e-b79a-9ceb350c3271: !Template
+    answer_choices: null
+    id: 900786a8-1841-438e-b79a-9ceb350c3271
+    jinja: "{% if score != 3 %}\nDoes the pair of sentences have similar meanings?\
+      \ Answer yes or no.\n\n\"{{ reference }}\" / \"{{ candidate }}\"\n\n{% if candidate2\
+      \ and score2 != 3 %}\n\"{{ reference }}\" / \"{{ candidate2 }}\"\n{% endif %}\n\
+      |||\n{{ [\"no\", \"yes\"][score > 3] }} \n\n{{[\"no\", \"yes\"][score2 > 3]\
+      \ if candidate2 != \"\" else \"\"}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: (Classification) Scoring Candidate against Reference w/o Context and Question
+    reference: Similarity measure between candidate and reference answers (in a classification
+      manner)
+  c06f4d3a-5a95-4b6a-b339-32391c8e6d94: !Template
+    answer_choices: null
+    id: c06f4d3a-5a95-4b6a-b339-32391c8e6d94
+    jinja: "{% set candidates = [] %}\n{% set new_candidates = [] %}\n{{ candidates.append(reference)\
+      \ or ''}}\n{{ candidates.append(candidate) or ''}}\n\n{% if candidate2 %}\n\
+      {{ candidates.append(candidate2) }}\n{% endif %}\n\n{# arbitrarily right shift\
+      \ the candidates list so the answer listed out in prompt looks random #}\n{%\
+      \ set length_text = context | length %}\n\n{% if length_text % 2 == 1 %}\n{%\
+      \ set new_candidates = candidates[-1:] + candidates[:-1] %}\n{% else %}\n{%\
+      \ set new_candidates = candidates[:] %}\n{% endif %}\n\nPassage: {{ context\
+      \ }}\n\nQuestion: {{ question }}\n\nAnswers: \n\n{% for cd in new_candidates\
+      \ %}\n{{loop.index}}. {{cd}}\n{% endfor %}\n\n\nSelect all the correct answers\
+      \ to the question, given the passage above.\n|||\n{% for cd in new_candidates\
+      \ %}\n{% if cd == reference %}\n{{ loop.index }}\n{% elif cd == candidate and\
+      \ score >= 3%}\n{{ loop.index }}\n{% elif cd == candidate2 and score2 >= 3%}\n\
+      {{ loop.index }}\n{% endif %}\n{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Pick Correct Answers
+    reference: Given passage, question, and all possible answers, pick correct answers  (using
+      similarity cutoff >=3 to reference answer)
diff --git a/promptsource/templates/movie_rationales/templates.yaml b/promptsource/templates/movie_rationales/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7b363297f573e3840eabef7f6c0ae51c8fb9273b
--- /dev/null
+++ b/promptsource/templates/movie_rationales/templates.yaml
@@ -0,0 +1,86 @@
+dataset: movie_rationales
+templates:
+  3ea71512-c48a-4898-8e29-6169a7a00752: !Template
+    answer_choices: Negative ||| Positive
+    id: 3ea71512-c48a-4898-8e29-6169a7a00752
+    jinja: "Review: {{review}} \n===\nIs this review negative or positive? |||\n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Standard binary sentiment analysis
+    reference: Standard binary sentiment analysis
+  5aaa7d8b-631a-4972-aeca-20a4e0518a60: !Template
+    answer_choices: Negative ||| Positive
+    id: 5aaa7d8b-631a-4972-aeca-20a4e0518a60
+    jinja: 'Evidences:
+
+      - {{ evidences | join("\n- ") }}
+
+      ===
+
+      Based on these review excerpts, is the review positive or negative? ||| {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Evidences sentiment classification
+    reference: Only taking the evidences as input
+  b953c90c-722a-487e-ab8d-c83ae45de139: !Template
+    answer_choices: Negative ||| Positive
+    id: b953c90c-722a-487e-ab8d-c83ae45de139
+    jinja: 'Review: {{review}}
+
+
+      Highlighted extracts:
+
+      - {{ evidences | join("\n- ") }}
+
+      ===
+
+      Based on this review and the highlighted extracts from the review, decide whether
+      this review is negative or positive. ||| {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Evidences + review
+    reference: Classification based both on evidences and review
+  e517bce9-5820-4f20-ad86-b2e3db9e6731: !Template
+    answer_choices: null
+    id: e517bce9-5820-4f20-ad86-b2e3db9e6731
+    jinja: 'Review: {{review}}
+
+      ===
+
+      This review is {% if label == 0 %}positive{% else %}negative{% endif %}. Extract
+      from it the passage that indicate it. |||
+
+      - {{ evidences | join("\n- ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Generate evidences
+    reference: From the review, extract the spans of text that let us think that the
+      review is positive or negative.
+  f11ea73a-3a03-43d8-90d8-4da3905161c2: !Template
+    answer_choices: null
+    id: f11ea73a-3a03-43d8-90d8-4da3905161c2
+    jinja: 'Review: {{review}}
+
+      ====
+
+      Is this review negative or positive? Extract from the review the passage that
+      proves this choice. |||
+
+      {{["Negative", "Positive"][label]}}
+
+      - {{ evidences | join("\n- ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Generate evidences and sentiment
+    reference: From the review, determine whether it is negative or positive and extract
+      the passages supporting this choice
diff --git a/promptsource/templates/multi_news/templates.yaml b/promptsource/templates/multi_news/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..23fffa2123964e88ba5eac3f0ac2b2a46ec23b18
--- /dev/null
+++ b/promptsource/templates/multi_news/templates.yaml
@@ -0,0 +1,153 @@
+dataset: multi_news
+templates:
+  12269bd1-1c3a-4865-9702-892782b593d9: !Template
+    answer_choices: null
+    id: 12269bd1-1c3a-4865-9702-892782b593d9
+    jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+      "") | list %}
+
+      What are the key points across these news articles:
+
+      {% for doc in docs %}
+
+
+      Article: {{doc}}
+
+      {% endfor %}
+
+      |||
+
+      {{summary[2:]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: what are the key points
+    reference: ''
+  940d0ce4-c1ef-4453-a47b-1abaaf811160: !Template
+    answer_choices: null
+    id: 940d0ce4-c1ef-4453-a47b-1abaaf811160
+    jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+      "") | list %}
+
+      Synthesize these documents into a single one:
+
+      {% for doc in docs %}
+
+
+      - {{doc}}
+
+      {% endfor %}
+
+      |||
+
+      {{summary[2:]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: synthesize
+    reference: ''
+  9ab370ad-2b89-4d2a-bb40-ccc31accefad: !Template
+    answer_choices: null
+    id: 9ab370ad-2b89-4d2a-bb40-ccc31accefad
+    jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+      "") | list %}
+
+      I want to edit the following articles into a more concise summary:
+
+      {% for doc in docs %}
+
+
+      Article: {{doc}}
+
+      {% endfor %}
+
+      |||
+
+      {{summary[2:]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: summary scenario
+    reference: ''
+  b15485f5-2bd9-4ed4-98ce-4b241a341f99: !Template
+    answer_choices: null
+    id: b15485f5-2bd9-4ed4-98ce-4b241a341f99
+    jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+      "") | list %}
+
+      Write a summary of the following articles:
+
+      {% for doc in docs %}
+
+
+      Document: {{doc}}
+
+      {% endfor %}
+
+      |||
+
+      {{summary[2:]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: summarize
+    reference: ''
+  bc910e51-c0a9-473c-aa85-adcab21b9ba9: !Template
+    answer_choices: null
+    id: bc910e51-c0a9-473c-aa85-adcab21b9ba9
+    jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+      "") | list%}
+
+      Write an expanded news article with plausible details from the following summary:
+
+      {{summary[2:]}}
+
+      |||
+
+      {{docs | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: expand (reverse task)
+    reference: ''
+  d5a4bb2a-634a-4e9a-9f1f-b0803894ca0f: !Template
+    answer_choices: null
+    id: d5a4bb2a-634a-4e9a-9f1f-b0803894ca0f
+    jinja: '{% set docs = document.split("3ed2dface8203c4c9dfb1a5dc58e41e0||") | reject("equalto",
+      "") | list %}
+
+      I''m trying to distill these articles down into one:
+
+      {% for doc in docs %}
+
+
+      Article: {{doc}}
+
+      {% endfor %}
+
+      |||
+
+      {{summary[2:]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: distill
+    reference: ''
diff --git a/promptsource/templates/multi_nli/templates.yaml b/promptsource/templates/multi_nli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1d9dd09590fdf00847b63109082e54942f425179
--- /dev/null
+++ b/promptsource/templates/multi_nli/templates.yaml
@@ -0,0 +1,87 @@
+dataset: multi_nli
+templates:
+  4100d721-b9c1-41ef-9392-3eeb64e8a4cd: !Template
+    answer_choices: must be true ||| might be true ||| must be false
+    id: 4100d721-b9c1-41ef-9392-3eeb64e8a4cd
+    jinja: Given that {{premise}}, it {{"must be true, might be true, or must be false"}}
+      that {{hypothesis}}? ||| It {{ answer_choices[label] }}.
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: "given\u2026 it must be true that\u2026"
+    reference: 'Maybe a little verbose for a generative model, but anecdotally this
+      is the most natural way of how I say an NLI sentence pair out loud to humans.
+      Caveat: NLI annotations are not meant to be strictly truth-conditional entailment,
+      so "must" is not ideal.'
+  6c6d824a-89c8-48cc-a4b5-cab04d649117: !Template
+    answer_choices: True ||| Neither ||| False
+    id: 6c6d824a-89c8-48cc-a4b5-cab04d649117
+    jinja: '{{premise}}
+
+      Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: GPT-3 style
+    reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+      is no task identifying tokens like "anli R1: ".'
+  b2dcedb3-265e-4dfa-bbd2-9d9d992bd389: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: b2dcedb3-265e-4dfa-bbd2-9d9d992bd389
+    jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+      ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: "given\u2026 does it follow that\u2026 "
+    reference: "\"Does it follow that\" could be replaced with \"can we infer that\u2026\
+      \ \", \"is it guaranteed that\u2026\", etc. Ideally there should be a question\
+      \ mark after \"does it follow that {hypothesis}?\", but the hypothesis string\
+      \ often comes with ending punctuations of its own."
+  ca5a9209-47ed-4ca9-9b03-7ea909f61d96: !Template
+    answer_choices: No ||| Neutral ||| Yes
+    id: ca5a9209-47ed-4ca9-9b03-7ea909f61d96
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 contradict Sentence 2? Yes, No, or Neutral? |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: does S1 contradict S2?
+    reference: Copied from Victor's prompts for XNLI.
+  d067f960-75d9-4fed-bf54-6ddaff123a57: !Template
+    answer_choices: Yes ||| Neutral ||| No
+    id: d067f960-75d9-4fed-bf54-6ddaff123a57
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 entail Sentence 2? Yes, No, or Neutral? |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: does S1 entail S2?
+    reference: Copied from Victor's prompts for XNLI.
+  f1a17d8b-78fb-4300-bc13-8c4572a091fa: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: f1a17d8b-78fb-4300-bc13-8c4572a091fa
+    jinja: '{{premise}} Based on the previous passage, is it true that {{hypothesis}}
+      Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
diff --git a/promptsource/templates/multi_nli_mismatch/templates.yaml b/promptsource/templates/multi_nli_mismatch/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8c81bf54ab54f3752139961cb1a73502874fba24
--- /dev/null
+++ b/promptsource/templates/multi_nli_mismatch/templates.yaml
@@ -0,0 +1,72 @@
+dataset: multi_nli_mismatch
+templates:
+  08024afb-d156-46eb-9697-d3f6fcfeb460: !Template
+    answer_choices: null
+    id: 08024afb-d156-46eb-9697-d3f6fcfeb460
+    jinja: "{{premise}}\nQuestion: {{hypothesis}} Yes, maybe, or no? ||| \n{{ {\"\
+      entailment\": \"Yes\", \"neutral\": \"Maybe\", \"contradiction\": \"No\"}[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: GPT3-style
+    reference: Similar to the Multi-NLI template
+  2d88a6db-69c0-4a1b-b63d-c296827fb3ca: !Template
+    answer_choices: null
+    id: 2d88a6db-69c0-4a1b-b63d-c296827fb3ca
+    jinja: "{{premise}} Based on the previous passage, is it true that {{hypothesis}}\
+      \ Yes, maybe, or no?\n||| \n{{ {\"entailment\": \"Yes\", \"neutral\": \"Neutral\"\
+      , \"contradiction\": \"No\"}[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Based on the previous passage
+    reference: Similar to the MNLI template
+  33cd3db0-ec12-4fe4-8e67-2ed2547e3102: !Template
+    answer_choices: null
+    id: 33cd3db0-ec12-4fe4-8e67-2ed2547e3102
+    jinja: "Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or\
+      \ maybe?\n||| \n{{ {\"entailment\": \"Yes\", \"neutral\": \"Maybe\", \"contradiction\"\
+      : \"No\"}[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: "given\u2026 does it follow that\u2026 "
+    reference: Similar to the MNLI template
+  eb305885-3cf3-49b0-92de-6b9f51628007: !Template
+    answer_choices: null
+    id: eb305885-3cf3-49b0-92de-6b9f51628007
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 entail Sentence 2? Yes, No, or Neutral? |||
+
+      {{ {"entailment": "Yes", "neutral": "Neutral", "contradiction": "No"}[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Does S1 entail S2?
+    reference: Similar to the MNLI template
+  ece94a4c-cfa1-4eb5-9af7-204d0c5791ea: !Template
+    answer_choices: null
+    id: ece94a4c-cfa1-4eb5-9af7-204d0c5791ea
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 contradict Sentence 2? Yes, No, or Neutral? |||
+
+      {{ {"entailment": "No", "neutral": "Neutral", "contradiction": "Yes"}[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Does S1 contradict S2?
+    reference: Similar to the MNLI template
diff --git a/promptsource/templates/multi_x_science_sum/templates.yaml b/promptsource/templates/multi_x_science_sum/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1f185e85b569e6a5f286ef947e572efd876f4164
--- /dev/null
+++ b/promptsource/templates/multi_x_science_sum/templates.yaml
@@ -0,0 +1,77 @@
+dataset: multi_x_science_sum
+templates:
+  2bca0197-e3d4-4870-bd95-178411e52e09: !Template
+    answer_choices: null
+    id: 2bca0197-e3d4-4870-bd95-178411e52e09
+    jinja: 'Use the reference abstracts to generate related work:
+
+
+      {{ref_abstract["abstract"]}}  |||
+
+      {{related_work}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: ref_relatedwork
+    reference: ''
+  3bd082cb-4e28-4eb7-9fa2-dd03f1f86219: !Template
+    answer_choices: null
+    id: 3bd082cb-4e28-4eb7-9fa2-dd03f1f86219
+    jinja: 'Given the abstract of a paper, provide some related work for readers to
+      learn further
+
+      {{abstract}} |||
+
+      {{related_work}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: abstract_relatedwork
+    reference: ''
+  af4d550e-54b8-471e-97af-2b2c50a1382e: !Template
+    answer_choices: null
+    id: af4d550e-54b8-471e-97af-2b2c50a1382e
+    jinja: 'Use the related work to guess the abstract:
+
+
+      {{related_work}} |||
+
+      {{abstract}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: relatedwork_abstract
+    reference: ''
+  b9eb67b4-415b-4a38-a90a-9dee3ae385d7: !Template
+    answer_choices: null
+    id: b9eb67b4-415b-4a38-a90a-9dee3ae385d7
+    jinja: 'Use the abstract and reference abstracts to generate related work:
+
+
+      {{abstract}} , {{ref_abstract["abstract"]}}  |||
+
+      {{related_work}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: related_work
+    reference: ''
+  f59d7e5a-5982-467f-b451-91154e311666: !Template
+    answer_choices: null
+    id: f59d7e5a-5982-467f-b451-91154e311666
+    jinja: 'Use reference papers to guess the abstract:
+
+
+      {{ref_abstract["abstract"]}} |||
+
+      {{abstract}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: ref_abstract
+    reference: ''
diff --git a/promptsource/templates/mwsc/templates.yaml b/promptsource/templates/mwsc/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c2421df42b4cb0f2894382dd795a03d2384aee29
--- /dev/null
+++ b/promptsource/templates/mwsc/templates.yaml
@@ -0,0 +1,59 @@
+dataset: mwsc
+templates:
+  66c3e53a-2f2f-4ab4-b17b-ca42535d4ea1: !Template
+    answer_choices: null
+    id: 66c3e53a-2f2f-4ab4-b17b-ca42535d4ea1
+    jinja: '{{ question|trim(''?'') }} in the sentence "{{ sentence|trim(''.'') }}"?
+      ||| {{ answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: in-the-sentence-question-first
+    reference: ''
+  8d4f3463-d64b-43be-b0ed-2455cb99e017: !Template
+    answer_choices: null
+    id: 8d4f3463-d64b-43be-b0ed-2455cb99e017
+    jinja: If I were to say "{{sentence}}" and then ask you "{{ question }}", what
+      do you think is the correct answer out of "{{ options|join('" and "')}}"? |||
+      {{ answer }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: what-think
+    reference: ''
+  a37a2745-c815-4f3a-8f78-3da2fceae7fe: !Template
+    answer_choices: null
+    id: a37a2745-c815-4f3a-8f78-3da2fceae7fe
+    jinja: In the sentence "{{ sentence|trim('.') }}", {{ question[0]|lower }}{{ question[1:]
+      }} ||| {{ answer }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: in-the-sentence
+    reference: ''
+  ad4b74f6-6b2f-40a8-8189-4ada58d64fd4: !Template
+    answer_choices: null
+    id: ad4b74f6-6b2f-40a8-8189-4ada58d64fd4
+    jinja: "{{sentence}} {{ question }} Was it \n\"{{options|join('\" or \"')}}\"\
+      ? ||| {{ answer }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: options-or
+    reference: ''
+  f0e01268-c83b-4785-b593-48eb4f9173cd: !Template
+    answer_choices: null
+    id: f0e01268-c83b-4785-b593-48eb4f9173cd
+    jinja: '{{ sentence }} Would "{{ options[0] }}" be correct if I were to ask you
+      {{question[0]|lower }}{{ question[1:] }} ||| {% if answer == options[0] %} Yes
+      {% else %} No {% endif %}   '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: is-correct
+    reference: ''
diff --git a/promptsource/templates/narrativeqa/templates.yaml b/promptsource/templates/narrativeqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..50882f7520e8601976cbe36eef6a04fed4696bd4
--- /dev/null
+++ b/promptsource/templates/narrativeqa/templates.yaml
@@ -0,0 +1,133 @@
+dataset: narrativeqa
+templates:
+  3b53c95c-022b-4a51-946a-6c88b962892a: !Template
+    answer_choices: null
+    id: 3b53c95c-022b-4a51-946a-6c88b962892a
+    jinja: 'Synopsis: {{ document.summary.text }}
+
+
+      Answer the question.
+
+
+      {{ question.text }}
+
+
+      Full text: {{ document.text }} |||
+
+      {{answers | map(attribute="text") | list | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_05
+    reference: Text + Summary
+  62df1726-be83-4403-9483-732da1174cf7: !Template
+    answer_choices: null
+    id: 62df1726-be83-4403-9483-732da1174cf7
+    jinja: '{{ document.text }}
+
+
+      Using the above text, answer the following question.
+
+
+      {{ question.text }} |||
+
+      {{answers | map(attribute="text") | list | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_01
+    reference: Text only
+  87cb2e86-0764-412a-ba2d-fe3172997a25: !Template
+    answer_choices: null
+    id: 87cb2e86-0764-412a-ba2d-fe3172997a25
+    jinja: '{{ document.text }}
+
+
+      Summarize the given document. |||
+
+      {{ document.summary.text }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_06
+    reference: Text summarization
+  adfa9f6d-5268-472f-b435-c4558f199961: !Template
+    answer_choices: null
+    id: adfa9f6d-5268-472f-b435-c4558f199961
+    jinja: "Full text: {{ document.text }}\n\nQuestion: {{ question.text }} \n\nCan\
+      \ you extract the answer to the above question from the full text?\n|||\n{{answers\
+      \ | map(attribute=\"text\") | list | choice }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_03
+    reference: Text only
+  c1170dbc-8b83-4797-aa32-ccafcfddad9e: !Template
+    answer_choices: null
+    id: c1170dbc-8b83-4797-aa32-ccafcfddad9e
+    jinja: 'Answer the question using the information given in the synopsis below.
+
+
+      {{ document.summary.text }}
+
+
+      Question: {{ question.text }} |||
+
+      {{answers | map(attribute="text") | list | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_02
+    reference: Summary only
+  ce20a6d2-df6b-4279-bde9-6543ef23ecff: !Template
+    answer_choices: null
+    id: ce20a6d2-df6b-4279-bde9-6543ef23ecff
+    jinja: 'Below is the summary of a document.
+
+
+      {{ document.summary.text }}
+
+
+      What is the answer to the following query?
+
+
+      {{ question.text }} |||
+
+      {{answers | map(attribute="text") | list | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_04
+    reference: Summary only
+  e750f922-2e1b-45c8-98de-3ada1fbde16b: !Template
+    answer_choices: null
+    id: e750f922-2e1b-45c8-98de-3ada1fbde16b
+    jinja: '{{ document.text }}
+
+
+      Can you briefly recapitulate the above document? |||
+
+      {{ document.summary.text }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_08
+    reference: Text Summarization
+  f6bedb53-fc87-47f3-94b2-07adb0de2e42: !Template
+    answer_choices: null
+    id: f6bedb53-fc87-47f3-94b2-07adb0de2e42
+    jinja: "State the main points mentioned in the below text.\n\n{{ document.text\
+      \ }}\n |||\n{{ document.summary.text }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_07
+    reference: Text Summarization
diff --git a/promptsource/templates/ncbi_disease/templates.yaml b/promptsource/templates/ncbi_disease/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5740a5f30f871ab5656c0d8fa315b29702216cf4
--- /dev/null
+++ b/promptsource/templates/ncbi_disease/templates.yaml
@@ -0,0 +1,226 @@
+dataset: ncbi_disease
+templates:
+  04458e59-37f1-48dc-bb20-823e836a8c44: !Template
+    answer_choices: null
+    id: 04458e59-37f1-48dc-bb20-823e836a8c44
+    jinja: 'What are the diseases mentioned in the following text?
+
+      {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+      |||
+
+      {% set diseases = {"list": [], "disease_started": False} %}
+
+      {% set disease_token = ""  %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+      {% set disease_token = tokens[loop.index - 1]  %}
+
+      {{ diseases.list.append(" ") |default("", True)}}
+
+      {{ diseases.list.append((disease_token[0]|upper) + disease_token[1:] if ner_tag
+      == 1 else disease_token) |default("", True)}}
+
+      {% elif diseases.disease_started %}
+
+      {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+      {{ diseases.list.append(",") |default("", True)}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "No
+      diseases found!"}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: simple_question_asking_response_as_text
+    reference: Simple question asking for the diseases present in a text, the response
+      is a text
+  04960166-100e-4615-a644-b62283336636: !Template
+    answer_choices: null
+    id: 04960166-100e-4615-a644-b62283336636
+    jinja: 'What are the token indexes corresponding to diseases mentioned in the
+      following text?
+
+      {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+      |||
+
+      {% set vars = {''no_disease'': True} %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ vars.update({''no_disease'': False}) | default("", True) }}
+
+      - {{(loop.index - 1)}}
+
+      {% endif %}
+
+      {% endfor %}
+
+
+      {% if vars.no_disease %}
+
+      There are no disease tokens in the text.
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: simple_question_asking_token_indexes_list
+    reference: A simple question asking which token indexes are speaking about a disease
+  5a693a8f-07a2-4d88-ab3a-337b1029d9a2: !Template
+    answer_choices: null
+    id: 5a693a8f-07a2-4d88-ab3a-337b1029d9a2
+    jinja: 'Are there diseases mentioned in the following text?
+
+      {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+      |||
+
+      {% set vars = {''no_disease'': True} %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ vars.update({''no_disease'': False}) | default("", True) }}
+
+      {% endif %}
+
+      {% endfor %}
+
+
+      {{"No there aren''t!" if vars.no_disease else "Yes there are!"}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: question_asking_diseases_presence
+    reference: Ask for disease presence in the text
+  f4cad387-e558-4087-854f-0991f7aafca8: !Template
+    answer_choices: null
+    id: f4cad387-e558-4087-854f-0991f7aafca8
+    jinja: '{{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+      What are the diseases mentioned in the previous text?
+
+      |||
+
+      {% set diseases = {"list": [], "disease_started": False} %}
+
+      {% set disease_token = ""  %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+      {% set disease_token = tokens[loop.index - 1]  %}
+
+      {{ diseases.list.append(" ") |default("", True)}}
+
+      {{ diseases.list.append((disease_token[0]|upper) + disease_token[1:] if ner_tag
+      == 1 else disease_token) |default("", True)}}
+
+      {% elif diseases.disease_started %}
+
+      {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+      {{ diseases.list.append(",") |default("", True)}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {{diseases.list | join | trim(",") if (diseases.list | length) > 0 else "No
+      diseases found!"}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: simple_question_asking_response_as_text_after_text
+    reference: Simple question asking response as a text, posing the question after
+  f8e9cf13-7914-4257-abda-f9e98d7ec7f9: !Template
+    answer_choices: null
+    id: f8e9cf13-7914-4257-abda-f9e98d7ec7f9
+    jinja: 'What are the diseases mentioned in the following text?
+
+      {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+      |||
+
+      {% set vars = {''no_disease'': True} %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ vars.update({''no_disease'': False}) | default("", True) }}
+
+      - {{tokens[loop.index - 1] }}
+
+      {% endif %}
+
+      {% endfor %}
+
+
+      {% if vars.no_disease %}
+
+      There are no diseases in the text.
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: simple_question_asking_word_list
+    reference: A question for listing the words that compose a disease entity
+  f91d18a6-6581-4379-8b46-06a9ef44b401: !Template
+    answer_choices: null
+    id: f91d18a6-6581-4379-8b46-06a9ef44b401
+    jinja: '{{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" -", "-") }}
+
+      Are there diseases mentioned in the previous text?
+
+      |||
+
+      {% set vars = {''no_disease'': True} %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ vars.update({''no_disease'': False}) | default("", True) }}
+
+      {% endif %}
+
+      {% endfor %}
+
+
+      {{"No there aren''t!" if vars.no_disease else "Yes there are!"}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: question_asking_diseases_presence_after_text
+    reference: Asking for disease presence after the text
diff --git a/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml b/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..56f57ea237d2aa169cdb3d47992c82cfdd7b0c17
--- /dev/null
+++ b/promptsource/templates/neural_code_search/evaluation_dataset/templates.yaml
@@ -0,0 +1,35 @@
+dataset: neural_code_search
+subset: evaluation_dataset
+templates:
+  a488e7e3-96bb-4816-a939-f92d87b0e39f: !Template
+    answer_choices: null
+    id: a488e7e3-96bb-4816-a939-f92d87b0e39f
+    jinja: "```\n{{answer}}\n```\n\nGiven that the previous code answers a question,\
+      \ what could have been the question?\n\n||| \n\n{{question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: neural_code_search2
+    reference: ''
+  c8902bf6-7a3f-4698-b9a9-96d7aa4d478b: !Template
+    answer_choices: null
+    id: c8902bf6-7a3f-4698-b9a9-96d7aa4d478b
+    jinja: "{{question}} \n\nCode this on Android.\n\n|||\n\n```\n{{answer}}\n```"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: neural_code_search1
+    reference: ''
+  fc3d224d-873c-4ab7-9fc0-44337348248e: !Template
+    answer_choices: null
+    id: fc3d224d-873c-4ab7-9fc0-44337348248e
+    jinja: "{{question}}\n\nWhat would be the best way to do this in Android?\n\n\
+      ||| \n\n{{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: neural_code_search3
+    reference: ''
diff --git a/promptsource/templates/newspop/templates.yaml b/promptsource/templates/newspop/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c88958cdcde5bf786ff8fec920afc10d208ad228
--- /dev/null
+++ b/promptsource/templates/newspop/templates.yaml
@@ -0,0 +1,89 @@
+dataset: newspop
+templates:
+  13fec306-ae32-4fc0-aa5c-d1fb1e717a5e: !Template
+    answer_choices: null
+    id: 13fec306-ae32-4fc0-aa5c-d1fb1e717a5e
+    jinja: "{{title}}\n\nThis title is about {{\"economy\"}}, or {{\"Microsoft\"}},\
+      \  or {{\"Obama\"}}, or  {{\"Palestine\"}}.\n\nAnswer: \n\n|||\n\n{{topic}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_3
+    reference: ''
+  71d4d30d-7340-4ad4-bbfe-d587361c3ad8: !Template
+    answer_choices: null
+    id: 71d4d30d-7340-4ad4-bbfe-d587361c3ad8
+    jinja: "{{headline}}\n\nThe article is about {{\"economy\"}}, {{\"Microsoft\"\
+      }}, {{\"Obama\"}}, or {{\"Palestine\"}}.\n\nTopic: \n\n|||\n\n{{topic}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_2
+    reference: ''
+  87986179-afef-4b2a-8ef0-4723a269ce47: !Template
+    answer_choices: null
+    id: 87986179-afef-4b2a-8ef0-4723a269ce47
+    jinja: "{{title}}\n\n: \n\n{{headline}} \n\nThis article is about one of the following\
+      \ topics {{\"economy\"}}, {{\"Microsoft\"}}, {{\"Obama\"}}, {{\"Palestine\"\
+      }}. Which is it?\n\nAnswer:\n\n||| \n\n{{topic}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_5
+    reference: ''
+  87fc2347-6d83-4624-9620-6890e287a120: !Template
+    answer_choices: null
+    id: 87fc2347-6d83-4624-9620-6890e287a120
+    jinja: "Title: {{title}} \n\nand \n\nHeadline: {{headline}}\n\nis news about {{\"\
+      economy\"}}, or {{\"Microsoft\"}}, or {{\"Obama\"}}, or {{\"Palestine\"}}\n\n\
+      Topic: \n\n|||\n\n{{topic}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_1
+    reference: ''
+  bfca2096-ee45-4ee3-acf0-e3a9c0acdc7c: !Template
+    answer_choices: null
+    id: bfca2096-ee45-4ee3-acf0-e3a9c0acdc7c
+    jinja: 'I read a news article titled :
+
+
+      {{title}}
+
+
+      The main article was: {{headline}}
+
+
+      And it was about one of the following: {{"economy"}}, {{"Microsoft"}}, {{"Obama"}},
+      or {{"Palestine"}}:
+
+
+      Answer:
+
+
+      |||
+
+
+      {{topic}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_4
+    reference: ''
+  e4cadaf7-5330-418c-bf8e-4897a39467f5: !Template
+    answer_choices: null
+    id: e4cadaf7-5330-418c-bf8e-4897a39467f5
+    jinja: "For the article, I am writing about {{topic}}:\n\nthe headline is related\
+      \ to {{\"economy\"}}, {{\"Microsoft\"}}, {{\"Obama\"}},  or {{\"Palestine\"\
+      }}::\n\n {{headline}}\n\nthe title is:\n\n|||\n\n{{title}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: template_6
+    reference: ''
diff --git a/promptsource/templates/nlu_evaluation_data/templates.yaml b/promptsource/templates/nlu_evaluation_data/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c945de67189a5b068e43a1518def9fed34b5b6e9
--- /dev/null
+++ b/promptsource/templates/nlu_evaluation_data/templates.yaml
@@ -0,0 +1,277 @@
+dataset: nlu_evaluation_data
+templates:
+  352e2261-149c-4363-a1e6-8fa4d5c93481: !Template
+    answer_choices: null
+    id: 352e2261-149c-4363-a1e6-8fa4d5c93481
+    jinja: "What service could be started when a user issues the following query ?\n\
+      {{text}}\n|||\n{{\n[\n  \"alarm_query\",\n  \"alarm_remove\",\n  \"alarm_set\"\
+      ,\n  \"audio_volume_down\",\n  \"audio_volume_mute\",\n  \"audio_volume_other\"\
+      ,\n  \"audio_volume_up\",\n  \"calendar_query\",\n  \"calendar_remove\",\n \
+      \ \"calendar_set\",\n  \"cooking_query\",\n  \"cooking_recipe\",\n  \"datetime_convert\"\
+      ,\n  \"datetime_query\",\n  \"email_addcontact\",\n  \"email_query\",\n  \"\
+      email_querycontact\",\n  \"email_sendemail\",\n  \"general_affirm\",\n  \"general_commandstop\"\
+      ,\n  \"general_confirm\",\n  \"general_dontcare\",\n  \"general_explain\",\n\
+      \  \"general_greet\",\n  \"general_joke\",\n  \"general_negate\",\n  \"general_praise\"\
+      ,\n  \"general_quirky\",\n  \"general_repeat\",\n  \"iot_cleaning\",\n  \"iot_coffee\"\
+      ,\n  \"iot_hue_lightchange\",\n  \"iot_hue_lightdim\",\n  \"iot_hue_lightoff\"\
+      ,\n  \"iot_hue_lighton\",\n  \"iot_hue_lightup\",\n  \"iot_wemo_off\",\n  \"\
+      iot_wemo_on\",\n  \"lists_createoradd\",\n  \"lists_query\",\n  \"lists_remove\"\
+      ,\n  \"music_dislikeness\",\n  \"music_likeness\",\n  \"music_query\",\n  \"\
+      music_settings\",\n  \"news_query\",\n  \"play_audiobook\",\n  \"play_game\"\
+      ,\n  \"play_music\",\n  \"play_podcasts\",\n  \"play_radio\",\n  \"qa_currency\"\
+      ,\n  \"qa_definition\",\n  \"qa_factoid\",\n  \"qa_maths\",\n  \"qa_stock\"\
+      ,\n  \"recommendation_events\",\n  \"recommendation_locations\",\n  \"recommendation_movies\"\
+      ,\n  \"social_post\",\n  \"social_query\",\n  \"takeaway_order\",\n  \"takeaway_query\"\
+      ,\n  \"transport_query\",\n  \"transport_taxi\",\n  \"transport_ticket\",\n\
+      \  \"transport_traffic\",\n  \"weather_query\"\n][label].split('_')[0]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_1
+    reference: ''
+  3ab6dff3-7056-4a0a-819b-b1ec267381c9: !Template
+    answer_choices: null
+    id: 3ab6dff3-7056-4a0a-819b-b1ec267381c9
+    jinja: "The virtual assistant received this query: {{text}}.\nWhat service should\
+      \ it activate among these choices: {{ ['music', 'news', 'alarm', 'social', 'play',\
+      \ 'qa', 'recommendation', 'lists', 'datetime', 'email', 'iot', 'general', 'weather',\
+      \ 'calendar', 'transport', 'audio', 'cooking', 'takeaway'] | join(', ') }}\n\
+      |||\n{{\n[\n  \"alarm_query\",\n  \"alarm_remove\",\n  \"alarm_set\",\n  \"\
+      audio_volume_down\",\n  \"audio_volume_mute\",\n  \"audio_volume_other\",\n\
+      \  \"audio_volume_up\",\n  \"calendar_query\",\n  \"calendar_remove\",\n  \"\
+      calendar_set\",\n  \"cooking_query\",\n  \"cooking_recipe\",\n  \"datetime_convert\"\
+      ,\n  \"datetime_query\",\n  \"email_addcontact\",\n  \"email_query\",\n  \"\
+      email_querycontact\",\n  \"email_sendemail\",\n  \"general_affirm\",\n  \"general_commandstop\"\
+      ,\n  \"general_confirm\",\n  \"general_dontcare\",\n  \"general_explain\",\n\
+      \  \"general_greet\",\n  \"general_joke\",\n  \"general_negate\",\n  \"general_praise\"\
+      ,\n  \"general_quirky\",\n  \"general_repeat\",\n  \"iot_cleaning\",\n  \"iot_coffee\"\
+      ,\n  \"iot_hue_lightchange\",\n  \"iot_hue_lightdim\",\n  \"iot_hue_lightoff\"\
+      ,\n  \"iot_hue_lighton\",\n  \"iot_hue_lightup\",\n  \"iot_wemo_off\",\n  \"\
+      iot_wemo_on\",\n  \"lists_createoradd\",\n  \"lists_query\",\n  \"lists_remove\"\
+      ,\n  \"music_dislikeness\",\n  \"music_likeness\",\n  \"music_query\",\n  \"\
+      music_settings\",\n  \"news_query\",\n  \"play_audiobook\",\n  \"play_game\"\
+      ,\n  \"play_music\",\n  \"play_podcasts\",\n  \"play_radio\",\n  \"qa_currency\"\
+      ,\n  \"qa_definition\",\n  \"qa_factoid\",\n  \"qa_maths\",\n  \"qa_stock\"\
+      ,\n  \"recommendation_events\",\n  \"recommendation_locations\",\n  \"recommendation_movies\"\
+      ,\n  \"social_post\",\n  \"social_query\",\n  \"takeaway_order\",\n  \"takeaway_query\"\
+      ,\n  \"transport_query\",\n  \"transport_taxi\",\n  \"transport_ticket\",\n\
+      \  \"transport_traffic\",\n  \"weather_query\"\n][label].split('_')[0]}} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_9
+    reference: ''
+  5346ace7-c124-4931-8180-9c4c6f55cec3: !Template
+    answer_choices: null
+    id: 5346ace7-c124-4931-8180-9c4c6f55cec3
+    jinja: "What service does the user intend to start when saying the following text\
+      \ ? \n{{text}}\n|||\n{{\n[\n  \"alarm_query\",\n  \"alarm_remove\",\n  \"alarm_set\"\
+      ,\n  \"audio_volume_down\",\n  \"audio_volume_mute\",\n  \"audio_volume_other\"\
+      ,\n  \"audio_volume_up\",\n  \"calendar_query\",\n  \"calendar_remove\",\n \
+      \ \"calendar_set\",\n  \"cooking_query\",\n  \"cooking_recipe\",\n  \"datetime_convert\"\
+      ,\n  \"datetime_query\",\n  \"email_addcontact\",\n  \"email_query\",\n  \"\
+      email_querycontact\",\n  \"email_sendemail\",\n  \"general_affirm\",\n  \"general_commandstop\"\
+      ,\n  \"general_confirm\",\n  \"general_dontcare\",\n  \"general_explain\",\n\
+      \  \"general_greet\",\n  \"general_joke\",\n  \"general_negate\",\n  \"general_praise\"\
+      ,\n  \"general_quirky\",\n  \"general_repeat\",\n  \"iot_cleaning\",\n  \"iot_coffee\"\
+      ,\n  \"iot_hue_lightchange\",\n  \"iot_hue_lightdim\",\n  \"iot_hue_lightoff\"\
+      ,\n  \"iot_hue_lighton\",\n  \"iot_hue_lightup\",\n  \"iot_wemo_off\",\n  \"\
+      iot_wemo_on\",\n  \"lists_createoradd\",\n  \"lists_query\",\n  \"lists_remove\"\
+      ,\n  \"music_dislikeness\",\n  \"music_likeness\",\n  \"music_query\",\n  \"\
+      music_settings\",\n  \"news_query\",\n  \"play_audiobook\",\n  \"play_game\"\
+      ,\n  \"play_music\",\n  \"play_podcasts\",\n  \"play_radio\",\n  \"qa_currency\"\
+      ,\n  \"qa_definition\",\n  \"qa_factoid\",\n  \"qa_maths\",\n  \"qa_stock\"\
+      ,\n  \"recommendation_events\",\n  \"recommendation_locations\",\n  \"recommendation_movies\"\
+      ,\n  \"social_post\",\n  \"social_query\",\n  \"takeaway_order\",\n  \"takeaway_query\"\
+      ,\n  \"transport_query\",\n  \"transport_taxi\",\n  \"transport_ticket\",\n\
+      \  \"transport_traffic\",\n  \"weather_query\"\n][label].split('_')[0]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_4
+    reference: ''
+  60fba53b-c5b0-464f-ae75-ce1aeea72dad: !Template
+    answer_choices: null
+    id: 60fba53b-c5b0-464f-ae75-ce1aeea72dad
+    jinja: "If the user intend to start this service, what would be a query for it\
+      \ ? \n{{scenario}}\n|||\n{{text}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_8
+    reference: ''
+  83b688de-bacd-4643-87a5-9a5a64ddbc46: !Template
+    answer_choices: null
+    id: 83b688de-bacd-4643-87a5-9a5a64ddbc46
+    jinja: "Which service from the following choices {{ ['music', 'news', 'alarm',\
+      \ 'social', 'play', 'qa', 'recommendation', 'lists', 'datetime', 'email', 'iot',\
+      \ 'general', 'weather', 'calendar', 'transport', 'audio', 'cooking', 'takeaway']\
+      \ | join(', ') }}  best describe this piece of text ? \n{{text}}\n|||\n{{\n\
+      [\n  \"alarm_query\",\n  \"alarm_remove\",\n  \"alarm_set\",\n  \"audio_volume_down\"\
+      ,\n  \"audio_volume_mute\",\n  \"audio_volume_other\",\n  \"audio_volume_up\"\
+      ,\n  \"calendar_query\",\n  \"calendar_remove\",\n  \"calendar_set\",\n  \"\
+      cooking_query\",\n  \"cooking_recipe\",\n  \"datetime_convert\",\n  \"datetime_query\"\
+      ,\n  \"email_addcontact\",\n  \"email_query\",\n  \"email_querycontact\",\n\
+      \  \"email_sendemail\",\n  \"general_affirm\",\n  \"general_commandstop\",\n\
+      \  \"general_confirm\",\n  \"general_dontcare\",\n  \"general_explain\",\n \
+      \ \"general_greet\",\n  \"general_joke\",\n  \"general_negate\",\n  \"general_praise\"\
+      ,\n  \"general_quirky\",\n  \"general_repeat\",\n  \"iot_cleaning\",\n  \"iot_coffee\"\
+      ,\n  \"iot_hue_lightchange\",\n  \"iot_hue_lightdim\",\n  \"iot_hue_lightoff\"\
+      ,\n  \"iot_hue_lighton\",\n  \"iot_hue_lightup\",\n  \"iot_wemo_off\",\n  \"\
+      iot_wemo_on\",\n  \"lists_createoradd\",\n  \"lists_query\",\n  \"lists_remove\"\
+      ,\n  \"music_dislikeness\",\n  \"music_likeness\",\n  \"music_query\",\n  \"\
+      music_settings\",\n  \"news_query\",\n  \"play_audiobook\",\n  \"play_game\"\
+      ,\n  \"play_music\",\n  \"play_podcasts\",\n  \"play_radio\",\n  \"qa_currency\"\
+      ,\n  \"qa_definition\",\n  \"qa_factoid\",\n  \"qa_maths\",\n  \"qa_stock\"\
+      ,\n  \"recommendation_events\",\n  \"recommendation_locations\",\n  \"recommendation_movies\"\
+      ,\n  \"social_post\",\n  \"social_query\",\n  \"takeaway_order\",\n  \"takeaway_query\"\
+      ,\n  \"transport_query\",\n  \"transport_taxi\",\n  \"transport_ticket\",\n\
+      \  \"transport_traffic\",\n  \"weather_query\"\n][label].split('_')[0]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_3
+    reference: ''
+  ada5fde5-3f31-404e-bd9c-48e380097cdc: !Template
+    answer_choices: null
+    id: ada5fde5-3f31-404e-bd9c-48e380097cdc
+    jinja: "What service from the following choices, {{ ['music', 'news', 'alarm',\
+      \ 'social', 'play', 'qa', 'recommendation', 'lists', 'datetime', 'email', 'iot',\
+      \ 'general', 'weather', 'calendar', 'transport', 'audio', 'cooking', 'takeaway']\
+      \ | join(', ') }} could start as a result of the query : {{text}} ? \n|||\n\
+      {{\n[\n  \"alarm_query\",\n  \"alarm_remove\",\n  \"alarm_set\",\n  \"audio_volume_down\"\
+      ,\n  \"audio_volume_mute\",\n  \"audio_volume_other\",\n  \"audio_volume_up\"\
+      ,\n  \"calendar_query\",\n  \"calendar_remove\",\n  \"calendar_set\",\n  \"\
+      cooking_query\",\n  \"cooking_recipe\",\n  \"datetime_convert\",\n  \"datetime_query\"\
+      ,\n  \"email_addcontact\",\n  \"email_query\",\n  \"email_querycontact\",\n\
+      \  \"email_sendemail\",\n  \"general_affirm\",\n  \"general_commandstop\",\n\
+      \  \"general_confirm\",\n  \"general_dontcare\",\n  \"general_explain\",\n \
+      \ \"general_greet\",\n  \"general_joke\",\n  \"general_negate\",\n  \"general_praise\"\
+      ,\n  \"general_quirky\",\n  \"general_repeat\",\n  \"iot_cleaning\",\n  \"iot_coffee\"\
+      ,\n  \"iot_hue_lightchange\",\n  \"iot_hue_lightdim\",\n  \"iot_hue_lightoff\"\
+      ,\n  \"iot_hue_lighton\",\n  \"iot_hue_lightup\",\n  \"iot_wemo_off\",\n  \"\
+      iot_wemo_on\",\n  \"lists_createoradd\",\n  \"lists_query\",\n  \"lists_remove\"\
+      ,\n  \"music_dislikeness\",\n  \"music_likeness\",\n  \"music_query\",\n  \"\
+      music_settings\",\n  \"news_query\",\n  \"play_audiobook\",\n  \"play_game\"\
+      ,\n  \"play_music\",\n  \"play_podcasts\",\n  \"play_radio\",\n  \"qa_currency\"\
+      ,\n  \"qa_definition\",\n  \"qa_factoid\",\n  \"qa_maths\",\n  \"qa_stock\"\
+      ,\n  \"recommendation_events\",\n  \"recommendation_locations\",\n  \"recommendation_movies\"\
+      ,\n  \"social_post\",\n  \"social_query\",\n  \"takeaway_order\",\n  \"takeaway_query\"\
+      ,\n  \"transport_query\",\n  \"transport_taxi\",\n  \"transport_ticket\",\n\
+      \  \"transport_traffic\",\n  \"weather_query\"\n][label].split('_')[0]}} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_10
+    reference: ''
+  bba54daf-a2ed-4bcb-ad63-8ad900fae3a7: !Template
+    answer_choices: null
+    id: bba54daf-a2ed-4bcb-ad63-8ad900fae3a7
+    jinja: "What would be a typical query for activating the following service ? \n\
+      {{scenario}}\n|||\n{{text}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_5
+    reference: ''
+  c1a4b9b6-09fa-41ca-820c-45c088a429b5: !Template
+    answer_choices: null
+    id: c1a4b9b6-09fa-41ca-820c-45c088a429b5
+    jinja: "Classify this text into one of the following choices:  {{ ['music', 'news',\
+      \ 'alarm', 'social', 'play', 'qa', 'recommendation', 'lists', 'datetime', 'email',\
+      \ 'iot', 'general', 'weather', 'calendar', 'transport', 'audio', 'cooking',\
+      \ 'takeaway'] | join(', ') }} \n{{text}}\n|||\n{{\n[\n  \"alarm_query\",\n \
+      \ \"alarm_remove\",\n  \"alarm_set\",\n  \"audio_volume_down\",\n  \"audio_volume_mute\"\
+      ,\n  \"audio_volume_other\",\n  \"audio_volume_up\",\n  \"calendar_query\",\n\
+      \  \"calendar_remove\",\n  \"calendar_set\",\n  \"cooking_query\",\n  \"cooking_recipe\"\
+      ,\n  \"datetime_convert\",\n  \"datetime_query\",\n  \"email_addcontact\",\n\
+      \  \"email_query\",\n  \"email_querycontact\",\n  \"email_sendemail\",\n  \"\
+      general_affirm\",\n  \"general_commandstop\",\n  \"general_confirm\",\n  \"\
+      general_dontcare\",\n  \"general_explain\",\n  \"general_greet\",\n  \"general_joke\"\
+      ,\n  \"general_negate\",\n  \"general_praise\",\n  \"general_quirky\",\n  \"\
+      general_repeat\",\n  \"iot_cleaning\",\n  \"iot_coffee\",\n  \"iot_hue_lightchange\"\
+      ,\n  \"iot_hue_lightdim\",\n  \"iot_hue_lightoff\",\n  \"iot_hue_lighton\",\n\
+      \  \"iot_hue_lightup\",\n  \"iot_wemo_off\",\n  \"iot_wemo_on\",\n  \"lists_createoradd\"\
+      ,\n  \"lists_query\",\n  \"lists_remove\",\n  \"music_dislikeness\",\n  \"music_likeness\"\
+      ,\n  \"music_query\",\n  \"music_settings\",\n  \"news_query\",\n  \"play_audiobook\"\
+      ,\n  \"play_game\",\n  \"play_music\",\n  \"play_podcasts\",\n  \"play_radio\"\
+      ,\n  \"qa_currency\",\n  \"qa_definition\",\n  \"qa_factoid\",\n  \"qa_maths\"\
+      ,\n  \"qa_stock\",\n  \"recommendation_events\",\n  \"recommendation_locations\"\
+      ,\n  \"recommendation_movies\",\n  \"social_post\",\n  \"social_query\",\n \
+      \ \"takeaway_order\",\n  \"takeaway_query\",\n  \"transport_query\",\n  \"transport_taxi\"\
+      ,\n  \"transport_ticket\",\n  \"transport_traffic\",\n  \"weather_query\"\n\
+      ][label].split('_')[0]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_7
+    reference: ''
+  ce432931-1040-41ab-a2af-04db800d00e3: !Template
+    answer_choices: null
+    id: ce432931-1040-41ab-a2af-04db800d00e3
+    jinja: "What is the user intent, when saying the following text ? \n{{text}}\n\
+      |||\n{{\n[\n  \"alarm_query\",\n  \"alarm_remove\",\n  \"alarm_set\",\n  \"\
+      audio_volume_down\",\n  \"audio_volume_mute\",\n  \"audio_volume_other\",\n\
+      \  \"audio_volume_up\",\n  \"calendar_query\",\n  \"calendar_remove\",\n  \"\
+      calendar_set\",\n  \"cooking_query\",\n  \"cooking_recipe\",\n  \"datetime_convert\"\
+      ,\n  \"datetime_query\",\n  \"email_addcontact\",\n  \"email_query\",\n  \"\
+      email_querycontact\",\n  \"email_sendemail\",\n  \"general_affirm\",\n  \"general_commandstop\"\
+      ,\n  \"general_confirm\",\n  \"general_dontcare\",\n  \"general_explain\",\n\
+      \  \"general_greet\",\n  \"general_joke\",\n  \"general_negate\",\n  \"general_praise\"\
+      ,\n  \"general_quirky\",\n  \"general_repeat\",\n  \"iot_cleaning\",\n  \"iot_coffee\"\
+      ,\n  \"iot_hue_lightchange\",\n  \"iot_hue_lightdim\",\n  \"iot_hue_lightoff\"\
+      ,\n  \"iot_hue_lighton\",\n  \"iot_hue_lightup\",\n  \"iot_wemo_off\",\n  \"\
+      iot_wemo_on\",\n  \"lists_createoradd\",\n  \"lists_query\",\n  \"lists_remove\"\
+      ,\n  \"music_dislikeness\",\n  \"music_likeness\",\n  \"music_query\",\n  \"\
+      music_settings\",\n  \"news_query\",\n  \"play_audiobook\",\n  \"play_game\"\
+      ,\n  \"play_music\",\n  \"play_podcasts\",\n  \"play_radio\",\n  \"qa_currency\"\
+      ,\n  \"qa_definition\",\n  \"qa_factoid\",\n  \"qa_maths\",\n  \"qa_stock\"\
+      ,\n  \"recommendation_events\",\n  \"recommendation_locations\",\n  \"recommendation_movies\"\
+      ,\n  \"social_post\",\n  \"social_query\",\n  \"takeaway_order\",\n  \"takeaway_query\"\
+      ,\n  \"transport_query\",\n  \"transport_taxi\",\n  \"transport_ticket\",\n\
+      \  \"transport_traffic\",\n  \"weather_query\"\n][label].replace('_', '  ')\n\
+      }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_2
+    reference: ''
+  ce869e8f-7975-4d5c-bd91-a23624c17285: !Template
+    answer_choices: null
+    id: ce869e8f-7975-4d5c-bd91-a23624c17285
+    jinja: "What service does the following piece of text talk about?\n{{text}}\n\
+      |||\n{{\n[\n  \"alarm_query\",\n  \"alarm_remove\",\n  \"alarm_set\",\n  \"\
+      audio_volume_down\",\n  \"audio_volume_mute\",\n  \"audio_volume_other\",\n\
+      \  \"audio_volume_up\",\n  \"calendar_query\",\n  \"calendar_remove\",\n  \"\
+      calendar_set\",\n  \"cooking_query\",\n  \"cooking_recipe\",\n  \"datetime_convert\"\
+      ,\n  \"datetime_query\",\n  \"email_addcontact\",\n  \"email_query\",\n  \"\
+      email_querycontact\",\n  \"email_sendemail\",\n  \"general_affirm\",\n  \"general_commandstop\"\
+      ,\n  \"general_confirm\",\n  \"general_dontcare\",\n  \"general_explain\",\n\
+      \  \"general_greet\",\n  \"general_joke\",\n  \"general_negate\",\n  \"general_praise\"\
+      ,\n  \"general_quirky\",\n  \"general_repeat\",\n  \"iot_cleaning\",\n  \"iot_coffee\"\
+      ,\n  \"iot_hue_lightchange\",\n  \"iot_hue_lightdim\",\n  \"iot_hue_lightoff\"\
+      ,\n  \"iot_hue_lighton\",\n  \"iot_hue_lightup\",\n  \"iot_wemo_off\",\n  \"\
+      iot_wemo_on\",\n  \"lists_createoradd\",\n  \"lists_query\",\n  \"lists_remove\"\
+      ,\n  \"music_dislikeness\",\n  \"music_likeness\",\n  \"music_query\",\n  \"\
+      music_settings\",\n  \"news_query\",\n  \"play_audiobook\",\n  \"play_game\"\
+      ,\n  \"play_music\",\n  \"play_podcasts\",\n  \"play_radio\",\n  \"qa_currency\"\
+      ,\n  \"qa_definition\",\n  \"qa_factoid\",\n  \"qa_maths\",\n  \"qa_stock\"\
+      ,\n  \"recommendation_events\",\n  \"recommendation_locations\",\n  \"recommendation_movies\"\
+      ,\n  \"social_post\",\n  \"social_query\",\n  \"takeaway_order\",\n  \"takeaway_query\"\
+      ,\n  \"transport_query\",\n  \"transport_taxi\",\n  \"transport_ticket\",\n\
+      \  \"transport_traffic\",\n  \"weather_query\"\n][label].split('_')[0]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: nlu_eval_data_6
+    reference: ''
diff --git a/promptsource/templates/nq_open/templates.yaml b/promptsource/templates/nq_open/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2caf9bfef03b92f7ea760984ec1a3e0bcbda07df
--- /dev/null
+++ b/promptsource/templates/nq_open/templates.yaml
@@ -0,0 +1,109 @@
+dataset: nq_open
+templates:
+  05b8ac63-5aa1-4ce7-8257-ade0fca889ae: !Template
+    answer_choices: null
+    id: 05b8ac63-5aa1-4ce7-8257-ade0fca889ae
+    jinja: 'The goal is to predict an English answer string for an input English question.
+      All questions can be answered using the contents of English Wikipedia.
+
+      Question: {{question}}
+
+      Answer:
+
+      |||
+
+      {{answer|choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: formal_description
+    reference: Copied from the dataset description.
+  0b23fe26-c659-4a84-834f-f19622d11412: !Template
+    answer_choices: null
+    id: 0b23fe26-c659-4a84-834f-f19622d11412
+    jinja: 'Question : {{question}}
+
+      Answer :
+
+      |||
+
+
+      {{answer | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: question_answer
+    reference: Plain Question
+  35113036-4cb4-4db5-a92e-d208e1b48b7c: !Template
+    answer_choices: null
+    id: 35113036-4cb4-4db5-a92e-d208e1b48b7c
+    jinja: 'Guess a question that has the answer "{{answer|choice}}"
+
+      |||
+
+      {{question}}?'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: guess_question
+    reference: Guess a question. It will show if model can evaluate entity in question.
+  5762f138-a3bf-4614-8dff-dcae7b5bd4a4: !Template
+    answer_choices: null
+    id: 5762f138-a3bf-4614-8dff-dcae7b5bd4a4
+    jinja: 'I''ve always wondered: {{question}}
+
+      |||
+
+      {{answer|choice}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: first_person_context
+    reference: Ask a question in first person
+  cd157288-0211-46a8-a00c-ba0e07980e37: !Template
+    answer_choices: null
+    id: cd157288-0211-46a8-a00c-ba0e07980e37
+    jinja: 'Search query: {{question}}
+
+      Response:
+
+      |||
+
+      {{answer|choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: search query
+    reference: ''
+  cf937d15-48e0-4ae3-a4eb-9098cccc58ce: !Template
+    answer_choices: null
+    id: cf937d15-48e0-4ae3-a4eb-9098cccc58ce
+    jinja: 'Answer the following question.
+
+      {{question}}
+
+      |||
+
+      {{answer|choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: question_with_instruction
+    reference: Instruction before question.
diff --git a/promptsource/templates/numer_sense/templates.yaml b/promptsource/templates/numer_sense/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..45d8a7e602368d2e5c061d4c57d6f474df272ff2
--- /dev/null
+++ b/promptsource/templates/numer_sense/templates.yaml
@@ -0,0 +1,111 @@
+dataset: numer_sense
+templates:
+  15ca5a34-71ed-48c0-b0ad-afc40e698a67: !Template
+    answer_choices: null
+    id: 15ca5a34-71ed-48c0-b0ad-afc40e698a67
+    jinja: "{{sentence | replace(\"<mask>\", \"__________\")}}\n\nThe above sentence\
+      \ can be filled with a number word. True or False?\n\n||| \n\n{% if target ==\
+      \ \"no\" %} \nFalse\n{% elif target != \"no\" %}\nTrue\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fill_in_the_blank_v6
+    reference: ''
+  1f959d92-dca8-4647-9840-69391dfbd000: !Template
+    answer_choices: null
+    id: 1f959d92-dca8-4647-9840-69391dfbd000
+    jinja: "Fill in the blank in the following sentence using world knowledge:\n\n\
+      {{sentence | replace(\"<mask>\", \"__________\")}}\n\nChose from the following\
+      \ options:\n\n{{\"nine\"}}, {{\"three\"}}, {{\"four\"}}, {{\"zero\"}}, {{\"\
+      two\"}}, {{\"six\"}}, {{\"eight\"}}, {{\"one\"}}, {{\"five\"}}, {{\"ten\"}},\
+      \ {{\"no\"}}, {{\"seven\"}}\n\n||| \n\n{{target}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: fill_in_the_blank_v5
+    reference: ''
+  4e9da2b8-2502-44a7-a7da-ae62f2d554c9: !Template
+    answer_choices: null
+    id: 4e9da2b8-2502-44a7-a7da-ae62f2d554c9
+    jinja: 'The following sentence needs to be filled with a word which is a number
+      word or "no". Using common sense and world knowledge fill in the blanks.
+
+
+      {{sentence | replace("<mask>", "__________")}}
+
+
+      Which is it?
+
+
+      |||
+
+
+      {{target}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: fill_in_the_blank_v2
+    reference: ''
+  5d8e8d21-8059-4373-bbf2-a25cbe1e6960: !Template
+    answer_choices: null
+    id: 5d8e8d21-8059-4373-bbf2-a25cbe1e6960
+    jinja: 'Using common sense reasoning of the world and only the following options,
+      how would you fill in the blank?:
+
+
+      {{"nine"}}, {{"three"}}, {{"four"}}, {{"zero"}}, {{"two"}}, {{"six"}}, {{"eight"}},
+      {{"one"}}, {{"five"}}, {{"ten"}}, {{"no"}}, {{"seven"}}
+
+
+      {{sentence | replace("<mask>", "__________")}}
+
+
+      |||
+
+
+      {{target}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: fill_in_the_blank_v3
+    reference: with all the given options
+  cacee36c-e2b7-458e-9d51-6fcfd83842b4: !Template
+    answer_choices: null
+    id: cacee36c-e2b7-458e-9d51-6fcfd83842b4
+    jinja: 'Fill in the blanks:
+
+
+      {{sentence | replace("<mask>", "__________")}}
+
+
+      The correct answer is:
+
+
+      |||
+
+
+      {{target}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: fill_in_the_blank_v1
+    reference: replace mask with fill in the blank
+  fc76beb7-c258-412f-a623-42fc8d2331b6: !Template
+    answer_choices: null
+    id: fc76beb7-c258-412f-a623-42fc8d2331b6
+    jinja: "{{sentence | replace(\"<mask>\", \"__________\")}}\n\nUsing only the following\
+      \ options, what answer would make the most sense in the blank above?\n\n{{\"\
+      nine\"}}, {{\"three\"}}, {{\"four\"}}, {{\"zero\"}}, {{\"two\"}}, {{\"six\"\
+      }}, {{\"eight\"}}, {{\"one\"}}, {{\"five\"}}, {{\"ten\"}}, {{\"no\"}}, {{\"\
+      seven\"}}\n\n||| \n\n{{target}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: fill_in_the_blank_v8
+    reference: missing word simple
diff --git a/promptsource/templates/onestop_english/templates.yaml b/promptsource/templates/onestop_english/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2755e8a2caef3db4a8fdf17ac44c25ae519437dd
--- /dev/null
+++ b/promptsource/templates/onestop_english/templates.yaml
@@ -0,0 +1,99 @@
+dataset: onestop_english
+templates:
+  2807f792-45a6-4139-8386-7cdc98651e53: !Template
+    answer_choices: Elementary ||| Intermediate ||| Advanced
+    id: 2807f792-45a6-4139-8386-7cdc98651e53
+    jinja: 'For English as Second Language (ESL) learners, would the text passage
+      below be at {{"Elementary"}}, {{"Intermediate"}} or {{"Advanced"}} level for
+      reading and simplifying?
+
+
+      "{{text}}"
+
+
+      |||
+
+
+      {{ answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: esl_context
+    reference: ''
+  553f2bbd-269c-4c4f-bc12-3825f155844d: !Template
+    answer_choices: Elementary ||| Intermediate ||| Advanced
+    id: 553f2bbd-269c-4c4f-bc12-3825f155844d
+    jinja: 'Consider the following text passage: {{text}}
+
+
+      How would you rate the difficulty level of the passage above for automatic readability
+      assessment? {{"Elementary"}}, {{"Intermediate"}} or {{"Advanced"}} level?
+
+
+      |||
+
+
+      {{ answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: ara_context
+    reference: ''
+  de75ccb8-c0ba-4510-abf8-649b42019cd5: !Template
+    answer_choices: Elementary ||| Intermediate ||| Advanced
+    id: de75ccb8-c0ba-4510-abf8-649b42019cd5
+    jinja: 'If the text passage below was to be presented to someone learning English
+      as their second language, how are they likely going to rate the difficulty level
+      of the text in terms of reading and simplifying? {{"Elementary"}}, {{"Intermediate"}}
+      or {{"Advanced"}} level?
+
+
+      "{{text}}"
+
+
+
+      |||
+
+
+      {{ answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: esl_variation
+    reference: ''
+  e1289be8-7e81-4a85-bfb3-225fd31749a7: !Template
+    answer_choices: Elementary ||| Intermediate ||| Advanced
+    id: e1289be8-7e81-4a85-bfb3-225fd31749a7
+    jinja: 'How would you assess the reading difficulty of the text passage below?
+      Choose from one of {{"Elementary"}}, {{"Intermediate"}} or {{"Advanced"}} levels.
+
+
+      "{{text}}"
+
+
+      |||
+
+
+      {{ answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: assess
+    reference: ''
+  fdc96a76-6415-437b-bf61-ef6d1d1b8645: !Template
+    answer_choices: Elementary ||| Intermediate ||| Advanced
+    id: fdc96a76-6415-437b-bf61-ef6d1d1b8645
+    jinja: "Consider the following text passage: \n\n\"{{text}}\"\n\nHow would you\
+      \ assess the difficulty level for the task of generating a simplified version\
+      \ of the text passage above? Would it be at the {{\"Elementary\"}}, {{\"Intermediate\"\
+      }} or {{\"Advanced\"}} level?\n\n|||\n\n{{ answer_choices [label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: ats
+    reference: ''
diff --git a/promptsource/templates/openbookqa/additional/templates.yaml b/promptsource/templates/openbookqa/additional/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cf7b1c29d580101e0d3c610471d044c7ac815f52
--- /dev/null
+++ b/promptsource/templates/openbookqa/additional/templates.yaml
@@ -0,0 +1,143 @@
+dataset: openbookqa
+subset: additional
+templates:
+  39af6992-b4d0-4b37-8a28-55ac16d38944: !Template
+    answer_choices: null
+    id: 39af6992-b4d0-4b37-8a28-55ac16d38944
+    jinja: '{{question_stem}}
+
+      - {{ choices["text"] | join("\n- ") }}
+
+
+      Which is the correct answer?
+
+      |||
+
+      {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: which_correct_inverse
+    reference: Giving options before asking question
+  6744fbdf-3bb6-4fd4-8dd5-64748fa7b44b: !Template
+    answer_choices: null
+    id: 6744fbdf-3bb6-4fd4-8dd5-64748fa7b44b
+    jinja: '{{question_stem}}
+
+
+      Choices:
+
+      - {{ choices["text"] | join("\n- ") }}
+
+      |||
+
+      {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choices
+    reference: ''
+  7482300b-30c0-479f-9635-2bb6eec315fd: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 7482300b-30c0-479f-9635-2bb6eec315fd
+    jinja: '{{question_stem}}
+
+      {% for k in range(choices["text"] | length) %}
+
+      {{'' -> ''.join([["A", "B", "C", "D"][k], choices["text"][k]])}}
+
+      {% endfor %}
+
+      Is the right answer {{"A, B, C or D"}} ?
+
+      |||
+
+      {{answerKey}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: pick_using_id
+    reference: Using the index (A, B, C, D) for the answer
+  87c7b3ed-d3fd-4ff1-bb45-293660998dde: !Template
+    answer_choices: null
+    id: 87c7b3ed-d3fd-4ff1-bb45-293660998dde
+    jinja: '{{question_stem}}
+
+
+      Choose an answer from this list:
+
+      - {{ choices["text"] | join("\n- ") }}
+
+      |||
+
+      {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choose_an_answer_with_options
+    reference: choose an answer from a list
+  92575e39-b256-413b-9c59-c96479ffd1a3: !Template
+    answer_choices: null
+    id: 92575e39-b256-413b-9c59-c96479ffd1a3
+    jinja: '{{question_stem}}
+
+
+      Which is the correct answer?
+
+      - {{ choices["text"] | join("\n- ") }}
+
+      |||
+
+      {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: which_correct
+    reference: ''
+  96b9fe26-8d82-444a-9489-7c00512d4a59: !Template
+    answer_choices: null
+    id: 96b9fe26-8d82-444a-9489-7c00512d4a59
+    jinja: '{{question_stem}}
+
+      - {{ choices["text"] | join("\n- ") }}
+
+      |||
+
+      {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: only_options
+    reference: Listing the options right after the question
+  ab352cd9-dd1e-4f9c-a1eb-e7aca7447e3a: !Template
+    answer_choices: null
+    id: ab352cd9-dd1e-4f9c-a1eb-e7aca7447e3a
+    jinja: '{{question_stem}}
+
+
+      Pick the right answer from the list:
+
+      - {{ choices["text"] | join("\n- ") }}
+
+      |||
+
+      {{choices["text"][{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: pick_answer_with_options
+    reference: ''
diff --git a/promptsource/templates/openbookqa/main/templates.yaml b/promptsource/templates/openbookqa/main/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e3c334d09f87b51ec17fe45897f76c6e809f9db1
--- /dev/null
+++ b/promptsource/templates/openbookqa/main/templates.yaml
@@ -0,0 +1,143 @@
+dataset: openbookqa
+subset: main
+templates:
+  0206de6a-22da-4558-9b75-40c558ba60be: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 0206de6a-22da-4558-9b75-40c558ba60be
+    jinja: '{{question_stem}}
+
+
+      Choose an answer from this list:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choose_an_answer_with_options
+    reference: choose an answer from a list
+  0dfe6c27-9716-455d-92a8-63ada1eb949b: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 0dfe6c27-9716-455d-92a8-63ada1eb949b
+    jinja: '{{question_stem}}
+
+
+      Which is the correct answer?
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: which_correct
+    reference: ''
+  90260bf9-caf1-4847-b0a7-c76bc015acbf: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 90260bf9-caf1-4847-b0a7-c76bc015acbf
+    jinja: '{{question_stem}}
+
+      {% for k in range(choices["text"] | length) %}
+
+      {{'' -> ''.join([["A", "B", "C", "D"][k], choices["text"][k]])}}
+
+      {% endfor %}
+
+      Is the right answer {{"A, B, C or D"}} ?
+
+      |||
+
+      {{answerKey}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: pick_using_id
+    reference: Using the index (A, B, C, D) for the answer
+  96e5065b-2876-4e4f-a33a-bb94c3505bb6: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 96e5065b-2876-4e4f-a33a-bb94c3505bb6
+    jinja: '{{question_stem}}
+
+
+      Choices:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choices
+    reference: ''
+  a4453d77-4cdd-44e5-9901-358f48631944: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: a4453d77-4cdd-44e5-9901-358f48631944
+    jinja: '{{question_stem}}
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: only_options
+    reference: Listing the options right after the question
+  c4814b92-9887-4b08-a4e2-1c7ca44345f7: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: c4814b92-9887-4b08-a4e2-1c7ca44345f7
+    jinja: '{{question_stem}}
+
+      - {{ answer_choices | join("\n- ") }}
+
+
+      Which is the correct answer?
+
+      |||
+
+      {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: which_correct_inverse
+    reference: Giving options before asking question
+  e9ca981e-0bda-4332-a101-41d5947df8f3: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: e9ca981e-0bda-4332-a101-41d5947df8f3
+    jinja: '{{question_stem}}
+
+
+      Pick the right answer from the list:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answerKey]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: pick_answer_with_options
+    reference: ''
diff --git a/promptsource/templates/paws-x/en/templates.yaml b/promptsource/templates/paws-x/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..102c1a4d15f2d72516e4bdc07082bdcbf9cd159d
--- /dev/null
+++ b/promptsource/templates/paws-x/en/templates.yaml
@@ -0,0 +1,147 @@
+dataset: paws-x
+subset: en
+templates:
+  0be7cecd-b427-4ec9-9b0e-666d6dae00dd: !Template
+    answer_choices: No ||| Yes
+    id: 0be7cecd-b427-4ec9-9b0e-666d6dae00dd
+    jinja: "Determine if the following two sentences paraphrase each other or not.\n\
+      Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: task_description-no-label
+    reference: Generalized prompt format, task_description-input.
+  472fe5eb-b499-4952-a930-f72f4ca9eddd: !Template
+    answer_choices: No ||| Yes
+    id: 472fe5eb-b499-4952-a930-f72f4ca9eddd
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+      \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Meaning
+    reference: Natural question
+  4c8d4e4c-eae4-45f6-bdf0-d132ae198ddd: !Template
+    answer_choices: No ||| Yes
+    id: 4c8d4e4c-eae4-45f6-bdf0-d132ae198ddd
+    jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+      ||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context-question-no-label
+    reference: Generalized prompt format, context-question without any label
+  678400f8-1a5c-4a40-b5ef-abeaa41e20dd: !Template
+    answer_choices: No ||| Yes
+    id: 678400f8-1a5c-4a40-b5ef-abeaa41e20dd
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+      \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Rewrite-no-label
+    reference: Natural Question without label
+  7c205a61-64d4-4673-bb8e-bfa77562eedd: !Template
+    answer_choices: No ||| Yes
+    id: 7c205a61-64d4-4673-bb8e-bfa77562eedd
+    jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+      Yes or No.\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context-question
+    reference: Generalized prompt format, context-question
+  8c259e88-7646-4a50-a4ca-90393920f2dd: !Template
+    answer_choices: No ||| Yes
+    id: 8c259e88-7646-4a50-a4ca-90393920f2dd
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+      \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Concatenation
+    reference: Concatenation of sentence 1 and sentence 2
+  a3ee450f-0d02-47c3-aa0b-00c3f80539dd: !Template
+    answer_choices: null
+    id: a3ee450f-0d02-47c3-aa0b-00c3f80539dd
+    jinja: "{% if label == 1 %} \nParaphrase the sentence: {{sentence1}} \n||| \n\
+      {{sentence2}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: paraphrase-task
+    reference: Create a generative paraphrase task
+  a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80dd: !Template
+    answer_choices: No ||| Yes
+    id: a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80dd
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+      \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Concatenation-no-label
+    reference: Concatenation of sentence 1 and sentence 2 without any label
+  d5239f5f-2014-47c9-a0c1-4896f76f82dd: !Template
+    answer_choices: No ||| Yes
+    id: d5239f5f-2014-47c9-a0c1-4896f76f82dd
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+      \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Meaning-no-label
+    reference: Natural question without label
+  d9911dad-75fe-4506-9843-3a46ba5e49dd: !Template
+    answer_choices: False ||| True
+    id: d9911dad-75fe-4506-9843-3a46ba5e49dd
+    jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: PAWS-ANLI GPT3
+    reference: ANLI prompt format from Table G7 in the GPT3 paper
+  dd52359b-dc56-4241-8179-c98c636f03dd: !Template
+    answer_choices: No ||| Yes
+    id: dd52359b-dc56-4241-8179-c98c636f03dd
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+      \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Rewrite
+    reference: Natural Question
+  f0866713-c59a-4c5d-a307-95e80a935fdd: !Template
+    answer_choices: No ||| Yes
+    id: f0866713-c59a-4c5d-a307-95e80a935fdd
+    jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: PAWS-ANLI GPT3-no-label
+    reference: ANLI prompt format from Table G7 in the GPT3 paper. Additionally added
+      task information without any label.
diff --git a/promptsource/templates/paws/labeled_final/templates.yaml b/promptsource/templates/paws/labeled_final/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f1fb16b4ea14419709bba8b2eb0cc9335d09e88c
--- /dev/null
+++ b/promptsource/templates/paws/labeled_final/templates.yaml
@@ -0,0 +1,147 @@
+dataset: paws
+subset: labeled_final
+templates:
+  0be7cecd-b427-4ec9-9b0e-666d6dae0063: !Template
+    answer_choices: No ||| Yes
+    id: 0be7cecd-b427-4ec9-9b0e-666d6dae0063
+    jinja: "Determine if the following two sentences paraphrase each other or not.\n\
+      Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: task_description-no-label
+    reference: Generalized prompt format, task_description-input.
+  472fe5eb-b499-4952-a930-f72f4ca9ed43: !Template
+    answer_choices: No ||| Yes
+    id: 472fe5eb-b499-4952-a930-f72f4ca9ed43
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+      \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Meaning
+    reference: Natural question
+  4c8d4e4c-eae4-45f6-bdf0-d132ae198d09: !Template
+    answer_choices: No ||| Yes
+    id: 4c8d4e4c-eae4-45f6-bdf0-d132ae198d09
+    jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+      ||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context-question-no-label
+    reference: Generalized prompt format, context-question without any label
+  678400f8-1a5c-4a40-b5ef-abeaa41e20ec: !Template
+    answer_choices: No ||| Yes
+    id: 678400f8-1a5c-4a40-b5ef-abeaa41e20ec
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+      \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Rewrite-no-label
+    reference: Natural Question without label
+  7c205a61-64d4-4673-bb8e-bfa77562eede: !Template
+    answer_choices: No ||| Yes
+    id: 7c205a61-64d4-4673-bb8e-bfa77562eede
+    jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+      Yes or No.\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context-question
+    reference: Generalized prompt format, context-question
+  8c259e88-7646-4a50-a4ca-90393920f281: !Template
+    answer_choices: No ||| Yes
+    id: 8c259e88-7646-4a50-a4ca-90393920f281
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+      \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Concatenation
+    reference: Concatenation of sentence 1 and sentence 2
+  a3ee450f-0d02-47c3-aa0b-00c3f80539e9: !Template
+    answer_choices: null
+    id: a3ee450f-0d02-47c3-aa0b-00c3f80539e9
+    jinja: "{% if label == 1 %} \nParaphrase the sentence: {{sentence1}} \n||| \n\
+      {{sentence2}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: paraphrase-task
+    reference: Create a generative paraphrase task
+  a6d9ec4e-acd4-46cd-9eeb-ae32e0ab8076: !Template
+    answer_choices: No ||| Yes
+    id: a6d9ec4e-acd4-46cd-9eeb-ae32e0ab8076
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+      \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Concatenation-no-label
+    reference: Concatenation of sentence 1 and sentence 2 without any label
+  d5239f5f-2014-47c9-a0c1-4896f76f82a4: !Template
+    answer_choices: No ||| Yes
+    id: d5239f5f-2014-47c9-a0c1-4896f76f82a4
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+      \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Meaning-no-label
+    reference: Natural question without label
+  d9911dad-75fe-4506-9843-3a46ba5e49be: !Template
+    answer_choices: False ||| True
+    id: d9911dad-75fe-4506-9843-3a46ba5e49be
+    jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: PAWS-ANLI GPT3
+    reference: ANLI prompt format from Table G7 in the GPT3 paper
+  dd52359b-dc56-4241-8179-c98c636f0335: !Template
+    answer_choices: No ||| Yes
+    id: dd52359b-dc56-4241-8179-c98c636f0335
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+      \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Rewrite
+    reference: Natural Question
+  f0866713-c59a-4c5d-a307-95e80a935f99: !Template
+    answer_choices: No ||| Yes
+    id: f0866713-c59a-4c5d-a307-95e80a935f99
+    jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: PAWS-ANLI GPT3-no-label
+    reference: ANLI prompt format from Table G7 in the GPT3 paper. Additionally added
+      task information without any label.
diff --git a/promptsource/templates/paws/labeled_swap/templates.yaml b/promptsource/templates/paws/labeled_swap/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1f9dd5df058905ce94bb8051fe4b747a6d4adcc7
--- /dev/null
+++ b/promptsource/templates/paws/labeled_swap/templates.yaml
@@ -0,0 +1,147 @@
+dataset: paws
+subset: labeled_swap
+templates:
+  0be7cecd-b427-4ec9-9b0e-666d6dae00aa: !Template
+    answer_choices: No ||| Yes
+    id: 0be7cecd-b427-4ec9-9b0e-666d6dae00aa
+    jinja: "Determine if the following two sentences paraphrase each other or not.\n\
+      Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: task_description-no-label
+    reference: Generalized prompt format, task_description-input.
+  472fe5eb-b499-4952-a930-f72f4ca9edaa: !Template
+    answer_choices: No ||| Yes
+    id: 472fe5eb-b499-4952-a930-f72f4ca9edaa
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+      \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Meaning
+    reference: Natural question
+  4c8d4e4c-eae4-45f6-bdf0-d132ae198daa: !Template
+    answer_choices: No ||| Yes
+    id: 4c8d4e4c-eae4-45f6-bdf0-d132ae198daa
+    jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+      ||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context-question-no-label
+    reference: Generalized prompt format, context-question without any label
+  678400f8-1a5c-4a40-b5ef-abeaa41e20aa: !Template
+    answer_choices: No ||| Yes
+    id: 678400f8-1a5c-4a40-b5ef-abeaa41e20aa
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+      \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Rewrite-no-label
+    reference: Natural Question without label
+  7c205a61-64d4-4673-bb8e-bfa77562eeaa: !Template
+    answer_choices: No ||| Yes
+    id: 7c205a61-64d4-4673-bb8e-bfa77562eeaa
+    jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+      Yes or No.\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context-question
+    reference: Generalized prompt format, context-question
+  8c259e88-7646-4a50-a4ca-90393920f2aa: !Template
+    answer_choices: No ||| Yes
+    id: 8c259e88-7646-4a50-a4ca-90393920f2aa
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+      \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Concatenation
+    reference: Concatenation of sentence 1 and sentence 2
+  a3ee450f-0d02-47c3-aa0b-00c3f80539aa: !Template
+    answer_choices: null
+    id: a3ee450f-0d02-47c3-aa0b-00c3f80539aa
+    jinja: "{% if label == 1 %} \nParaphrase the sentence: {{sentence1}} \n||| \n\
+      {{sentence2}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: paraphrase-task
+    reference: Create a generative paraphrase task
+  a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80aa: !Template
+    answer_choices: No ||| Yes
+    id: a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80aa
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+      \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Concatenation-no-label
+    reference: Concatenation of sentence 1 and sentence 2 without any label
+  d5239f5f-2014-47c9-a0c1-4896f76f82aa: !Template
+    answer_choices: No ||| Yes
+    id: d5239f5f-2014-47c9-a0c1-4896f76f82aa
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+      \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Meaning-no-label
+    reference: Natural question without label
+  d9911dad-75fe-4506-9843-3a46ba5e49aa: !Template
+    answer_choices: False ||| True
+    id: d9911dad-75fe-4506-9843-3a46ba5e49aa
+    jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: PAWS-ANLI GPT3
+    reference: ANLI prompt format from Table G7 in the GPT3 paper
+  dd52359b-dc56-4241-8179-c98c636f03aa: !Template
+    answer_choices: No ||| Yes
+    id: dd52359b-dc56-4241-8179-c98c636f03aa
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+      \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Rewrite
+    reference: Natural Question
+  f0866713-c59a-4c5d-a307-95e80a935faa: !Template
+    answer_choices: No ||| Yes
+    id: f0866713-c59a-4c5d-a307-95e80a935faa
+    jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: PAWS-ANLI GPT3-no-label
+    reference: ANLI prompt format from Table G7 in the GPT3 paper. Additionally added
+      task information without any label.
diff --git a/promptsource/templates/paws/unlabeled_final/templates.yaml b/promptsource/templates/paws/unlabeled_final/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..53c64530680fedeb1e1be7e433a139da137fa4b2
--- /dev/null
+++ b/promptsource/templates/paws/unlabeled_final/templates.yaml
@@ -0,0 +1,147 @@
+dataset: paws
+subset: unlabeled_final
+templates:
+  0be7cecd-b427-4ec9-9b0e-666d6dae00bb: !Template
+    answer_choices: No ||| Yes
+    id: 0be7cecd-b427-4ec9-9b0e-666d6dae00bb
+    jinja: "Determine if the following two sentences paraphrase each other or not.\n\
+      Sent 1: {{sentence1}}\nSent 2: {{sentence2}}\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: task_description-no-label
+    reference: Generalized prompt format, task_description-input.
+  472fe5eb-b499-4952-a930-f72f4ca9edbb: !Template
+    answer_choices: No ||| Yes
+    id: 472fe5eb-b499-4952-a930-f72f4ca9edbb
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+      \ 1 and Sentence 2 express the same meaning? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Meaning
+    reference: Natural question
+  4c8d4e4c-eae4-45f6-bdf0-d132ae198dbb: !Template
+    answer_choices: No ||| Yes
+    id: 4c8d4e4c-eae4-45f6-bdf0-d132ae198dbb
+    jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+      ||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context-question-no-label
+    reference: Generalized prompt format, context-question without any label
+  678400f8-1a5c-4a40-b5ef-abeaa41e20bb: !Template
+    answer_choices: No ||| Yes
+    id: 678400f8-1a5c-4a40-b5ef-abeaa41e20bb
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+      \ rewrite Sentence 1 to Sentence 2? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Rewrite-no-label
+    reference: Natural Question without label
+  7c205a61-64d4-4673-bb8e-bfa77562eebb: !Template
+    answer_choices: No ||| Yes
+    id: 7c205a61-64d4-4673-bb8e-bfa77562eebb
+    jinja: "{{sentence1}}\nIs that a paraphrase of the following sentence?\n{{sentence2}}?\n\
+      Yes or No.\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context-question
+    reference: Generalized prompt format, context-question
+  8c259e88-7646-4a50-a4ca-90393920f2bb: !Template
+    answer_choices: No ||| Yes
+    id: 8c259e88-7646-4a50-a4ca-90393920f2bb
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+      \ 1 paraphrase Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Concatenation
+    reference: Concatenation of sentence 1 and sentence 2
+  a3ee450f-0d02-47c3-aa0b-00c3f80539bb: !Template
+    answer_choices: null
+    id: a3ee450f-0d02-47c3-aa0b-00c3f80539bb
+    jinja: "{% if label == 1 %} \nParaphrase the sentence: {{sentence1}} \n||| \n\
+      {{sentence2}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: paraphrase-task
+    reference: Create a generative paraphrase task
+  a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80bb: !Template
+    answer_choices: No ||| Yes
+    id: a6d9ec4e-acd4-46cd-9eeb-ae32e0ab80bb
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Does Sentence\
+      \ 1 paraphrase Sentence 2? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Concatenation-no-label
+    reference: Concatenation of sentence 1 and sentence 2 without any label
+  d5239f5f-2014-47c9-a0c1-4896f76f82bb: !Template
+    answer_choices: No ||| Yes
+    id: d5239f5f-2014-47c9-a0c1-4896f76f82bb
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do Sentence\
+      \ 1 and Sentence 2 express the same meaning? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Meaning-no-label
+    reference: Natural question without label
+  d9911dad-75fe-4506-9843-3a46ba5e49bb: !Template
+    answer_choices: False ||| True
+    id: d9911dad-75fe-4506-9843-3a46ba5e49bb
+    jinja: "{{sentence1}} Question: {{sentence2}} True or False? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: PAWS-ANLI GPT3
+    reference: ANLI prompt format from Table G7 in the GPT3 paper
+  dd52359b-dc56-4241-8179-c98c636f03bb: !Template
+    answer_choices: No ||| Yes
+    id: dd52359b-dc56-4241-8179-c98c636f03bb
+    jinja: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Can we\
+      \ rewrite Sentence 1 to Sentence 2? Yes or No? \n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Rewrite
+    reference: Natural Question
+  f0866713-c59a-4c5d-a307-95e80a935fbb: !Template
+    answer_choices: No ||| Yes
+    id: f0866713-c59a-4c5d-a307-95e80a935fbb
+    jinja: "{{sentence1}} Question: {{sentence2}} Paraphrase or not?\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: PAWS-ANLI GPT3-no-label
+    reference: ANLI prompt format from Table G7 in the GPT3 paper. Additionally added
+      task information without any label.
diff --git a/promptsource/templates/piqa/templates.yaml b/promptsource/templates/piqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..82d6eee3015833156019f29f9bbd59d044698838
--- /dev/null
+++ b/promptsource/templates/piqa/templates.yaml
@@ -0,0 +1,243 @@
+dataset: piqa
+templates:
+  16e97a16-c958-4956-bfba-279f88dafd5b: !Template
+    answer_choices: '{{sol1}} ||| {{sol2}}'
+    id: 16e97a16-c958-4956-bfba-279f88dafd5b
+    jinja: 'Goal: {{goal}}
+
+
+      Which is the correct ending?
+
+      - {{sol1}}
+
+      - {{sol2}}
+
+
+      Answer:
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: what_is_the_correct_ending
+    reference: ''
+  3f336295-c1f7-410a-8fc6-d2ed79487aa4: !Template
+    answer_choices: '{{sol1}} ||| {{sol2}}'
+    id: 3f336295-c1f7-410a-8fc6-d2ed79487aa4
+    jinja: '{{"Solution 1"}}: {{sol1}}
+
+      {{"Solution 2"}}: {{sol2}}
+
+
+      Goal: {{goal}}
+
+
+      Given the goal, what is the correct solution?
+
+
+      Answer by copying the correct solution
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: pick_correct_choice_with_choice_given_before_goal
+    reference: ''
+  44778818-7b73-4262-a294-c00fc32b6c2c: !Template
+    answer_choices: 1 ||| 2
+    id: 44778818-7b73-4262-a294-c00fc32b6c2c
+    jinja: 'Sentence: {{goal}}
+
+
+      Choice {{answer_choices[0]}}: {{sol1}}
+
+
+      Choice {{answer_choices[1]}}: {{sol2}}
+
+
+      What is the index of the correct choice for ending for the sentence?
+
+
+      Answer:
+
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: pick_correct_choice_index
+    reference: ''
+  5f4b4645-9438-4375-9062-083130e6d04e: !Template
+    answer_choices: null
+    id: 5f4b4645-9438-4375-9062-083130e6d04e
+    jinja: "Given a goal and a wrong solution, rewrite it to give a correct solution.\n\
+      Goal: {{goal}} \nSolution: {{[sol1, sol2][1 - label]}}\nCorrected solution:\n\
+      |||\n{{[sol1, sol2][label]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Correct the solution
+    reference: ''
+  94c39589-7bfb-4c09-9337-672369459545: !Template
+    answer_choices: '{{sol1}} ||| {{sol2}}'
+    id: 94c39589-7bfb-4c09-9337-672369459545
+    jinja: 'Finish the following sentence with the best choice: {{goal}}
+
+
+      Choices:
+
+      - {{sol1}}
+
+      - {{sol2}}
+
+
+      Answer:
+
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: finish_sentence_with_correct_choice
+    reference: ''
+  99565244-4eaf-4004-a28b-4362ba5bcac3: !Template
+    answer_choices: No ||| Yes
+    id: 99565244-4eaf-4004-a28b-4362ba5bcac3
+    jinja: '{{goal}} {{sol2}}
+
+      Does this phrase make sense?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Does this solution make sense? sol2
+    reference: ''
+  adfef248-f856-44fa-affd-e3223718854e: !Template
+    answer_choices: Solution 1 ||| Solution 2
+    id: adfef248-f856-44fa-affd-e3223718854e
+    jinja: 'Given a goal and 2 solutions, choose the most appropriate solution.
+
+      Goal: {{goal}}
+
+      - {{"Solution 1"}}: {{sol1}}
+
+      - {{"Solution 2"}}: {{sol2}}
+
+
+      Answer by returning either {{"Solution 1"}} or {{"Solution 2"}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choose the most appropriate solution
+    reference: ''
+  b5c69473-eedb-4c4f-a5fa-d4e266e43535: !Template
+    answer_choices: null
+    id: b5c69473-eedb-4c4f-a5fa-d4e266e43535
+    jinja: 'Given a sentence, correct it if it doesn''t make sense. If it makes sense,
+      just return it as the answer.
+
+      Input: {{goal}} {{sol2[0].lower() + sol2[1:]}}
+
+      Output:
+
+      |||
+
+      {{goal}} {{[sol1[0].lower() + sol1[1:], sol2[0].lower() + sol2[1:]][label]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: 'Correct the solution if false: from sol 2'
+    reference: ''
+  c8c45ef1-2ffc-43d7-8710-b98c2fc4f699: !Template
+    answer_choices: null
+    id: c8c45ef1-2ffc-43d7-8710-b98c2fc4f699
+    jinja: '{{goal}}
+
+      |||
+
+      {{[sol1[0].lower() + sol1[1:], sol2[0].lower() + sol2[1:]][label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: no prompt needed
+    reference: ''
+  f044def7-01c2-42de-b6ad-4e8c63ab2bf1: !Template
+    answer_choices: Yes ||| No
+    id: f044def7-01c2-42de-b6ad-4e8c63ab2bf1
+    jinja: 'Does this phrase make sense?
+
+      {{goal}} {{sol1[0].lower() + sol1[1:]}}
+
+      Answer with {{answer_choices[0]}} or {{answer_choices[1]}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Does this solution make sense? sol1
+    reference: ''
+  f42cd457-a14b-465a-a139-d7d2407a3bac: !Template
+    answer_choices: null
+    id: f42cd457-a14b-465a-a139-d7d2407a3bac
+    jinja: 'Sentence: {{goal}} {{sol1[0].lower() + sol1[1:]}}
+
+      If the sentence does not make sense, correct it so that it does make sense.
+      Otherwise, just copy it.
+
+      Answer:
+
+      |||
+
+      {{goal}} {{[sol1[0].lower() + sol1[1:], sol2[0].lower() + sol2[1:]][label]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: 'Correct the solution if false: from sol 1'
+    reference: ''
diff --git a/promptsource/templates/poem_sentiment/templates.yaml b/promptsource/templates/poem_sentiment/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f9e4285fe4fce291ced5281293accf953b63e3b5
--- /dev/null
+++ b/promptsource/templates/poem_sentiment/templates.yaml
@@ -0,0 +1,118 @@
+dataset: poem_sentiment
+templates:
+  211c0765-1f51-4574-b354-040273ea7c38: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: 211c0765-1f51-4574-b354-040273ea7c38
+    jinja: '{{verse_text}} How does the reader feel about this poem? |||  {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_5
+    reference: ''
+  2714baf0-5d19-4781-a60f-f44cd95935f7: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: 2714baf0-5d19-4781-a60f-f44cd95935f7
+    jinja: '{{verse_text}} Is the sentiment the poet express for the poem {{"negative"}},
+      {{"positive"}}, {{"neutral"}} or {{"mixed"}}? |||  {{ answer_choices [label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_6
+    reference: ''
+  574ab816-b0bc-4049-a5a5-dcf8f4280dc5: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: 574ab816-b0bc-4049-a5a5-dcf8f4280dc5
+    jinja: The following poem expresses what sentiment? {{verse_text}} ||| {{ answer_choices
+      [label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_3
+    reference: ''
+  7801d04c-4f42-4411-a552-9614c8c3fd53: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: 7801d04c-4f42-4411-a552-9614c8c3fd53
+    jinja: '{{verse_text}} The sentiment expressed in the poem is |||  {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_2
+    reference: ''
+  9fa8eeb4-314b-4850-a28b-0f53bca006d8: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: 9fa8eeb4-314b-4850-a28b-0f53bca006d8
+    jinja: '{{verse_text}} What is the sentiment expressed in this poem? |||  {{ answer_choices
+      [label] }}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_1
+    reference: ''
+  aecb3d13-ff68-4e60-a382-87191940bd5b: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: aecb3d13-ff68-4e60-a382-87191940bd5b
+    jinja: '{{verse_text}} The most appropriate word out of {{"negative"}}, {{"positive"}},
+      {{"neutral"}} and {{"mixed"}}, which express the poet''s sentiment is: |||  {{
+      answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_4
+    reference: ''
+  ca15cecb-4ee6-4445-a0f4-6ef5cd519923: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: ca15cecb-4ee6-4445-a0f4-6ef5cd519923
+    jinja: "{{verse_text}} What sentiement does this poem express? \nOptions: {{\"\
+      negative\"}}, {{\"positive\"}}, {{\"neutral\"}},{{\"mixed\"}}.\nAnswer:  |||\
+      \ {{ answer_choices [label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_10
+    reference: ''
+  e53d2fe8-b83b-484e-81f9-efca32bd7012: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: e53d2fe8-b83b-484e-81f9-efca32bd7012
+    jinja: '{{verse_text}} Out of {{"negative"}}, {{"positive"}}, {{"neutral"}} and
+      {{"mixed"}} sentiments, the poem expresses ||| {{ answer_choices [label] }}
+      sentiments.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_7
+    reference: ''
+  f87a7ba0-11f7-41f9-bee6-94d0ad6e597a: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: f87a7ba0-11f7-41f9-bee6-94d0ad6e597a
+    jinja: Does this poem express a {{"negative"}}, {{"positive"}}, {{"neutral"}}
+      or {{"mixed"}} sentiment? {{verse_text}} ||| {{ answer_choices [label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_9
+    reference: ''
+  fdbebd3d-6517-4be1-8771-489e2de658ef: !Template
+    answer_choices: negative ||| positive ||| neutral ||| mixed
+    id: fdbebd3d-6517-4be1-8771-489e2de658ef
+    jinja: '{{verse_text}} The poet wants the readers to feel ||| {{ answer_choices
+      [label] }} sentiments.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: poem_sentiment_8
+    reference: ''
diff --git a/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml b/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..15d24afceb1c09686086576f7ede60b10c012406
--- /dev/null
+++ b/promptsource/templates/pubmed_qa/pqa_labeled/templates.yaml
@@ -0,0 +1,102 @@
+dataset: pubmed_qa
+subset: pqa_labeled
+templates:
+  00f58886-e04a-4efb-bf41-cfcbd00a5e7d: !Template
+    answer_choices: null
+    id: 00f58886-e04a-4efb-bf41-cfcbd00a5e7d
+    jinja: "\"{{ context.contexts | join(\", \") }}\"\n\nAnswer the following question.\n\
+      \nQ: \"{{question}}\" ||| \n{{long_answer}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Question Answering (Long)
+    reference: Provide a long/verbose answer to the provided question
+  0b630e04-02a8-46d6-b164-a41cd34042ff: !Template
+    answer_choices: null
+    id: 0b630e04-02a8-46d6-b164-a41cd34042ff
+    jinja: '"{{ context.contexts | join(", ") }}"
+
+
+      What is the main question answered by the above research abstract? |||
+
+      {{question}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: 'Generate Question Title '
+    reference: Given abstract, generate title (which is in the form of a question)
+  1e0a77f8-0eb4-40a1-814d-8a111df66e5e: !Template
+    answer_choices: null
+    id: 1e0a77f8-0eb4-40a1-814d-8a111df66e5e
+    jinja: "Q: \"{{ question  }}\" \n\nA: \"{{ long_answer }}\"\n\nSummarize the above\
+      \ answer as: YES, NO or MAYBE ? |||\n{{final_decision}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Long Answer to Final Decision
+    reference: Given a question, the full text of the relevant answer, summarize a
+      yes/no/maybe answer
+  21240f74-530a-47b7-a5d9-a6a13083b72e: !Template
+    answer_choices: null
+    id: 21240f74-530a-47b7-a5d9-a6a13083b72e
+    jinja: '{% set n_sections = context.contexts | length %}
+
+      {% set choice = range(0, n_sections) | random %}
+
+
+      "{{ context.contexts[choice] }}"
+
+
+      In a research article, the above text would most likely be found in which section:  {{
+      context.labels[:-1] | join(", ") }} or {{ context.labels[-1] }} ? |||
+
+
+      {{ context.labels[choice] }}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Context Section Type
+    reference: Assign text in an abstract to specific paper section headers
+  45cb344c-bb36-492a-ace0-7cfc897e127a: !Template
+    answer_choices: null
+    id: 45cb344c-bb36-492a-ace0-7cfc897e127a
+    jinja: '"{{ context.contexts | join(", ") }}"
+
+
+      What are the MeSH terms for this PubMed abstract? |||
+
+      {{ context.meshes | join(", ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: Medical Subject Headings
+    reference: 'Predict the set of MeSH terms for a given PubMed abstract '
+  91d481e5-fac6-4532-b013-5ac1235b6e1a: !Template
+    answer_choices: null
+    id: 91d481e5-fac6-4532-b013-5ac1235b6e1a
+    jinja: '"{{ context.contexts | join(", ") }}"
+
+
+      Answer the following question as YES, NO, MAYBE
+
+
+      Q: "{{question}}"
+
+
+      A: ||| {{final_decision}}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Question Answering (Short)
+    reference: Answer the following question using the provided abstract text
diff --git a/promptsource/templates/qa_srl/templates.yaml b/promptsource/templates/qa_srl/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..76e762009321e3929f2ca6bc800c0088197de0f2
--- /dev/null
+++ b/promptsource/templates/qa_srl/templates.yaml
@@ -0,0 +1,124 @@
+dataset: qa_srl
+templates:
+  3cb09519-032e-4e51-bb97-47aa18ab4367: !Template
+    answer_choices: null
+    id: 3cb09519-032e-4e51-bb97-47aa18ab4367
+    jinja: 'Generate a plausible question that has the following answers based on
+      the context:
+
+
+      Context: {{sentence}}
+
+
+      Answers: {{answers | join(", ")}} |||
+
+      {{question | join(" ") | replace("_ ", "")}} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_question
+    reference: ''
+  b614f251-eb01-442f-8743-57d18314a0f8: !Template
+    answer_choices: null
+    id: b614f251-eb01-442f-8743-57d18314a0f8
+    jinja: 'The English teacher deconstructed an example sentence that contained the
+      verb "{{predicate}}": {{sentence}}
+
+
+      {{question | join(" ") | replace("_ ", "")}}|||
+
+      {{answers | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: deconstruct_sentence
+    reference: ''
+  bec2ce78-fb31-4529-8b13-240fa6c8bc88: !Template
+    answer_choices: null
+    id: bec2ce78-fb31-4529-8b13-240fa6c8bc88
+    jinja: 'Identify the predicate (the part of a sentence or clause containing a
+      verb and stating something about the subject) in this sentence:
+
+
+      {{sentence}} |||
+
+      {{predicate}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: identify_predicate
+    reference: ''
+  c0192db4-f672-4b36-94b2-b10ca7b6861a: !Template
+    answer_choices: null
+    id: c0192db4-f672-4b36-94b2-b10ca7b6861a
+    jinja: '{{sentence}}
+
+      {{question|join(" ")|replace("_ ", "")}} |||
+
+      {{answers | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: sentence_question_concatenation
+    reference: ''
+  dda098f9-74c4-4f9f-8052-20e692c72c92: !Template
+    answer_choices: null
+    id: dda098f9-74c4-4f9f-8052-20e692c72c92
+    jinja: 'Here''s a linguistic problem: you have to correctly identify the part
+      of the sentence that answers the following {{"W"}} question.
+
+      Sentence: {{sentence}}
+
+      Question: {{question | join(" ") | replace("_ ", "")}}|||
+
+      {{answers | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: linguistic_problem
+    reference: ''
+  e9ca90f5-f105-4515-b757-262dad590913: !Template
+    answer_choices: null
+    id: e9ca90f5-f105-4515-b757-262dad590913
+    jinja: 'Help me parse the structure of the following sentence constructed around
+      the verb "{{predicate}}": {{sentence}}
+
+
+      {{question | join(" ") | replace("_ ", "")}}|||
+
+      {{answers | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: parse_structure
+    reference: ''
+  ec16cb98-8c5b-4219-8b2d-acd8b6236c86: !Template
+    answer_choices: null
+    id: ec16cb98-8c5b-4219-8b2d-acd8b6236c86
+    jinja: '{{sentence}}
+
+
+      The previous sentence contains the verb "{{predicate}}". Answer this question
+      about it: {{question|join(" ")|replace("_ ", "")}}|||
+
+      {{answers | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: answer_question
+    reference: ''
diff --git a/promptsource/templates/qa_zre/templates.yaml b/promptsource/templates/qa_zre/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85ff54f5745f62bb8b85c4bfe7617135e538951e
--- /dev/null
+++ b/promptsource/templates/qa_zre/templates.yaml
@@ -0,0 +1,83 @@
+dataset: qa_zre
+templates:
+  2d6b6ec6-4cba-4a07-a0d1-f6b7cb103281: !Template
+    answer_choices: null
+    id: 2d6b6ec6-4cba-4a07-a0d1-f6b7cb103281
+    jinja: 'Extract the appropriate relation from the following question
+
+
+      {{question}} |||
+
+      {{relation}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: relation
+    reference: https://www.aclweb.org/anthology/K17-1034.pdf
+  5a970b88-53a0-4148-b45e-7ac410df263f: !Template
+    answer_choices: null
+    id: 5a970b88-53a0-4148-b45e-7ac410df263f
+    jinja: "{% if answers|length > 0 %}\nWhat is a possible question that can be generated\
+      \ from the following context and answer(s)?\n\n{{context}} \n\n{{answers|join(\"\
+      , \")}} |||\n{{question|replace(\"XXX\",subject)}} \n\n{% endif %} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: possible_qn_with_answer
+    reference: ''
+  6368de04-070a-4f67-a8bf-fd6d2c07d401: !Template
+    answer_choices: null
+    id: 6368de04-070a-4f67-a8bf-fd6d2c07d401
+    jinja: 'What does "XXX" represent in the following context-question pair?
+
+
+      {{context}}
+
+      {{question}} |||
+
+      {{subject}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: subject
+    reference: ''
+  8f76743d-6486-4ae1-8bc8-ae644e3c54aa: !Template
+    answer_choices: null
+    id: 8f76743d-6486-4ae1-8bc8-ae644e3c54aa
+    jinja: 'Extract the appropriate relation from the following question
+
+
+      {{question|replace("XXX",subject)}} |||
+
+      {{relation}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: relation2
+    reference: ''
+  b2195890-a3c5-4e33-be4a-5e53af75e6dd: !Template
+    answer_choices: null
+    id: b2195890-a3c5-4e33-be4a-5e53af75e6dd
+    jinja: '
+
+      {% if answers|length > 0 %}
+
+
+      {{context}}
+
+      {{question.replace("XXX",subject)}} |||
+
+      {{answers|join(", ")}}
+
+
+      {% endif %} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: qa
+    reference: ''
diff --git a/promptsource/templates/qasc/templates.yaml b/promptsource/templates/qasc/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ce395f3a8ce1c5b0c5f5f52d746e8400666764a8
--- /dev/null
+++ b/promptsource/templates/qasc/templates.yaml
@@ -0,0 +1,125 @@
+dataset: qasc
+templates:
+  3e1e6ca0-b95e-4e68-bb6a-cd47c8429658: !Template
+    answer_choices: Yes ||| No
+    id: 3e1e6ca0-b95e-4e68-bb6a-cd47c8429658
+    jinja: "If I tell you that {{combinedfact[0]|capitalize}}{{ combinedfact[1:]|trim('.')\
+      \ }}, and ask you the question \"{{ question[0]|lower }}{{ question[1:] }}\"\
+      , is the correct answer \"{{ choices.text[0][0]|lower}}{{ choices.text[0][1:]|trim('.')\
+      \ }}\"? \n\n||| \n\n{% if answerKey == choices.label[0] %} Yes {% else %} No\
+      \ {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: is_correct_1
+    reference: ''
+  40ef67db-dff3-4e7b-b167-f7e54b400c74: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 40ef67db-dff3-4e7b-b167-f7e54b400c74
+    jinja: "{{ fact1[0]|capitalize }}{{ fact1[1:]|trim|trim('.') }}, and {{fact2[0]|lower\
+      \ }}{{ fact2[1:]|trim|trim('.') }}. Given these facts, {{ question[0]|lower\
+      \ }}{{question[1:]|trim('?') }} among the following options:\n- {{answer_choices\
+      \ | join(\"\\n - \") }}\n\n||| \n\n{% for choice in choices.label %} {% if choice\
+      \ == answerKey %}{{ answer_choices[loop.index - 1] }}{% endif %}{% endfor %} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: qa_with_separated_facts_1
+    reference: Question Answering with separated facts.
+  5d63c186-e047-49dd-b5fd-c4a574f6f0e2: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 5d63c186-e047-49dd-b5fd-c4a574f6f0e2
+    jinja: "Fact 1: {{ fact1[0]|capitalize }}{{ fact1[1:]|trim|trim('.') }}.\n\nFact\
+      \ 2: {{fact2[0]|capitalize }}{{ fact2[1:]|trim|trim('.') }}.\n\nGiven the two\
+      \ facts above, {{ question[0]|lower }}{{question[1:]|trim('?') }}?\n\n||| \n\
+      \n{% for choice in choices.label %} {% if choice == answerKey %}{{ answer_choices[loop.index\
+      \ - 1] }}{% endif %}{% endfor %}  "
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: qa_with_separated_facts_3
+    reference: Question Answering with separated facts.
+  604dd379-21f1-4d30-af2f-22f9a8a97ceb: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 604dd379-21f1-4d30-af2f-22f9a8a97ceb
+    jinja: "You are presented with the question \"{{ question }}\" and the following\
+      \ answer choices: \n- {{answer_choices | join(\"\\n - \") }}\n\nNow knowing\
+      \ that {{ fact1[0]|lower }}{{ fact1[1:]|trim|trim('.') }} and {{fact2[0]|lower\
+      \ }}{{ fact2[1:]|trim|trim('.') }}, choose the best answer.\n\n||| \n\n{% for\
+      \ choice in choices.label %} {% if choice == answerKey %}{{ answer_choices[loop.index\
+      \ - 1] }}{% endif %}{% endfor %} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: qa_with_separated_facts_4
+    reference: Question Answering with separated facts.
+  67cb1b35-e5d4-490b-beb8-dc9c0be9c298: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 67cb1b35-e5d4-490b-beb8-dc9c0be9c298
+    jinja: "You are presented with the quiz \"{{ question }}\" \n\nBut you don't know\
+      \ the answer, so you turn to your teacher to ask for hints. He says that \"\
+      {{ fact1[0]|lower }}{{ fact1[1:]|trim|trim('.') }}\" and \"{{fact2[0]|lower\
+      \ }}{{ fact2[1:]|trim|trim('.') }}\". \n\nSo, what's the best answer to the\
+      \ question?\n\n||| \n\n{% for choice in choices.label %} {% if choice == answerKey\
+      \ %}{{ answer_choices[loop.index - 1] }}{% endif %}{% endfor %}   "
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: qa_with_separated_facts_5
+    reference: Question Answering with separated facts.
+  b5b61423-8655-408d-a8e6-81a5eaaac2aa: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: b5b61423-8655-408d-a8e6-81a5eaaac2aa
+    jinja: "If {{ combinedfact[0]|lower }}{{ combinedfact[1:]|trim|trim('.') }}, then\
+      \ {{ question[0]|lower }}{{question[1:]|trim|trim('?') }}?\n\nAnswer choices:\n\
+      - {{answer_choices | join(\"\\n - \") }}\n||| \n\n{% for choice in choices.label\
+      \ %} {% if choice == answerKey %}{{ answer_choices[loop.index - 1] }}{% endif\
+      \ %}{% endfor %}  "
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: qa_with_combined_facts_1
+    reference: ''
+  c251edaf-a063-40fe-bd49-038843fcdb23: !Template
+    answer_choices: Yes ||| No
+    id: c251edaf-a063-40fe-bd49-038843fcdb23
+    jinja: "Do you think the right answer to the question \"{{ question[0]|lower }}{{\
+      \ question[1:] }}\" is \"{{ choices.text[1][0]|lower}}{{ choices.text[1][1:]|trim('.')\
+      \ }}\", given that\n {{combinedfact[0]|lower}}{{ combinedfact[1:]|trim('.')\
+      \ }}?\n ||| \n{% if answerKey == choices.label[0] %} Yes {% else %} No {% endif\
+      \ %}   "
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: is_correct_2
+    reference: ''
+  c7cd51f1-ea49-4d6a-a422-46624333b7b1: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: c7cd51f1-ea49-4d6a-a422-46624333b7b1
+    jinja: "Fact 1: {{ fact1[0]|capitalize }}{{ fact1[1:]|trim|trim('.') }}.\n\nFact\
+      \ 2: {{fact2[0]|capitalize }}{{ fact2[1:]|trim|trim('.') }}.\n\nGiven the two\
+      \ facts above, answer the question \"{{ question }}\" with the following options:\
+      \ \n- {{answer_choices | join(\"\\n - \") }}\n\n||| \n\n{% for choice in choices.label\
+      \ %} {% if choice == answerKey %}{{ answer_choices[loop.index - 1] }}{% endif\
+      \ %}{% endfor %} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: qa_with_separated_facts_2
+    reference: Question Answering with separated facts.
diff --git a/promptsource/templates/qed/templates.yaml b/promptsource/templates/qed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aecd19b4259ffb43e6f2671478437160ce96f330
--- /dev/null
+++ b/promptsource/templates/qed/templates.yaml
@@ -0,0 +1,88 @@
+dataset: qed
+templates:
+  292db39d-b9e9-4113-b59d-6c5b93133563: !Template
+    answer_choices: null
+    id: 292db39d-b9e9-4113-b59d-6c5b93133563
+    jinja: 'Give a suitable title to the following passage:
+
+
+      {{paragraph_text}} |||
+
+
+      {{title_text}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: title_prediction
+    reference: ''
+  3578c1ee-8872-406f-be9f-b7e174aed92c: !Template
+    answer_choices: null
+    id: 3578c1ee-8872-406f-be9f-b7e174aed92c
+    jinja: "Question: {{question}} \n\nAnswer: |||\n\n{{original_nq_answers[0][\"\
+      string\"]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: original_nq_answers
+    reference: ''
+  383d06fe-d562-4883-8d29-b727d4c3877b: !Template
+    answer_choices: null
+    id: 383d06fe-d562-4883-8d29-b727d4c3877b
+    jinja: "{% if annotation['selected_sentence']['string']!=\"\" %}\nQuestion: {{question}}\n\
+      \nHint: {{paragraph_text}}  \n\nAnswer: |||\n\n{{annotation['selected_sentence']['string']}}\n\
+      {% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic
+    reference: ''
+  4cc7af61-ee7a-491f-b232-8ef3dc7d1415: !Template
+    answer_choices: No ||| Yes
+    id: 4cc7af61-ee7a-491f-b232-8ef3dc7d1415
+    jinja: '{{paragraph_text}}
+
+
+      Does the above passage contain the answer to the following question:
+
+
+      {{question}} |||
+
+      {{answer_choices[annotation["explaination_type"]!="none"]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: is_answer_exist
+    reference: ''
+  5a1e57c7-39b2-486e-9112-d4311aee6bdc: !Template
+    answer_choices: null
+    id: 5a1e57c7-39b2-486e-9112-d4311aee6bdc
+    jinja: 'I found the following Wikipedia article:
+
+
+      "{{paragraph_text}}"
+
+
+      Can you predict its title? |||
+
+      {{title_text}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: topic_prompt
+    reference: ''
+  7d3746b5-52e6-4ce1-b441-007f271f477b: !Template
+    answer_choices: null
+    id: 7d3746b5-52e6-4ce1-b441-007f271f477b
+    jinja: "I need to prepare for my upcoming test. Can you read the below passage\
+      \ and ask me a reasonable question? \n\n{{paragraph_text}} ||| \n\n{{question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: question_forming
+    reference: ''
diff --git a/promptsource/templates/quac/templates.yaml b/promptsource/templates/quac/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..de8a977fc234860ccb9bd145e054ef8e9fbd5dc6
--- /dev/null
+++ b/promptsource/templates/quac/templates.yaml
@@ -0,0 +1,126 @@
+dataset: quac
+templates:
+  12c9d007-991c-49ed-82e2-13a7d3147881: !Template
+    answer_choices: null
+    id: 12c9d007-991c-49ed-82e2-13a7d3147881
+    jinja: "Given the  partial dialogue : \n\nStudent: {{questions[0]}}\n\nTeacher:\
+      \ {{(answers.texts[0] | choice).replace(\"CANNOTANSWER\",\"Cannot answer\")\
+      \ }}\n\nThe context : {{context}}\n\nAnswer the question: {{questions[1] }}\n\
+      |||\n{{(answers.texts[1] | choice).replace(\"CANNOTANSWER\",\"Cannot answer\"\
+      )  }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: Answer Given Only First Dialogue
+    reference: ''
+  1d4014c2-7cf7-45d1-8f85-a701b6a65118: !Template
+    answer_choices: null
+    id: 1d4014c2-7cf7-45d1-8f85-a701b6a65118
+    jinja: "Given the dialogue: \n{% for i in range(0, questions | length - 1)%}\n\
+      Student: {{questions[i]}}\n\nTeacher: {{(answers.texts[i] | choice).replace(\"\
+      CANNOTANSWER\",\"Cannot answer\") }}\n{% endfor %}\n\nThe context: {{context}}\n\
+      \nAnswer the question: {{questions | last }}\n|||\n{{(answers.texts | last |\
+      \ choice).replace(\"CANNOTANSWER\",\"Cannot answer\") }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Answer Given Full Dialogue
+    reference: ''
+  1f28f748-bc9e-4881-89ae-1d561abf2f2b: !Template
+    answer_choices: null
+    id: 1f28f748-bc9e-4881-89ae-1d561abf2f2b
+    jinja: 'This conversation happened between a teacher and a student:
+
+      {% for i in range(0, questions | length - 1) %}
+
+      Student: {{questions[i]}}
+
+
+      Teacher: {{(answers.texts[i] | choice).replace("CANNOTANSWER","Cannot answer")
+      }}
+
+      {% endfor %}
+
+
+      Use the article : {{context}} to answer the question: {{questions | last }}
+
+      |||
+
+      {{(answers.texts | last | choice).replace("CANNOTANSWER","Cannot answer") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: 'Answer Converation '
+    reference: ''
+  2c052ef8-adfa-497b-adb1-9e942ad998e0: !Template
+    answer_choices: null
+    id: 2c052ef8-adfa-497b-adb1-9e942ad998e0
+    jinja: "I read an article : {{context}} \n\nThen the following conversation occurred:\
+      \ \n{% for i in range(0, questions | length - 1) %}\nStudent: {{questions[i]}}\n\
+      \nTeacher: {{(answers.texts[i] | choice).replace(\"CANNOTANSWER\",\"Cannot answer\"\
+      ) }}\n{% endfor %}\nUse both to answer the question:  {{questions | last }}\n\
+      |||\n{{(answers.texts | last | choice).replace(\"CANNOTANSWER\",\"Cannot answer\"\
+      ) }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: 'Context First '
+    reference: ''
+  70c85283-44f3-4d31-866c-02294aed7a59: !Template
+    answer_choices: null
+    id: 70c85283-44f3-4d31-866c-02294aed7a59
+    jinja: "Read the article: {{context}} \n\nThen answer the question:  {{questions\
+      \ | last}}\n\nYou can use this dialogue to find the answer faster:\n{% for i\
+      \ in range(0, questions | length - 1)%}\nStudent: {{questions[i]}}\n\nTeacher:\
+      \ {{(answers.texts[i] | choice).replace(\"CANNOTANSWER\",\"Cannot answer\")\
+      \ }}\n{% endfor %}\n|||\n{{(answers.texts | last | choice).replace(\"CANNOTANSWER\"\
+      ,\"Cannot answer\")  }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Use Dialogue as Hint
+    reference: ''
+  a62e17c6-8973-43a3-863a-40bb12a1c8cf: !Template
+    answer_choices: null
+    id: a62e17c6-8973-43a3-863a-40bb12a1c8cf
+    jinja: 'A student is asking a teacher about the following article:
+
+      {{context}}
+
+
+      This is a summary of their conversation:
+
+      {% for i in range(0, questions | length - 1)%}
+
+      Student: {{questions[i]}}
+
+
+      Teacher: {{(answers.texts[i] | choice).replace("CANNOTANSWER","Cannot answer")
+      }}
+
+      {% endfor %}
+
+
+      Use their conversation and the article to answer the question :  {{questions
+      | last}}
+
+      |||
+
+      {{(answers.texts | last | choice).replace("CANNOTANSWER","Cannot answer")  }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: 'Student Asking Teacher '
+    reference: ''
diff --git a/promptsource/templates/quail/templates.yaml b/promptsource/templates/quail/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c7dadcd92d437d46ff0e4c267941b17b543f4fb5
--- /dev/null
+++ b/promptsource/templates/quail/templates.yaml
@@ -0,0 +1,309 @@
+dataset: quail
+templates:
+  01870e5a-39d0-4485-a453-893d46c82736: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 01870e5a-39d0-4485-a453-893d46c82736
+    jinja: '{{ context }}
+
+      Question: {{ question }}
+
+      Options:
+
+      {% for k in range(answers | length) %}
+
+      {{''. ''.join([answer_choices[k], answers[k]])}}
+
+      {% endfor %}
+
+      ===
+
+      The correct answer is
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_question_answer_description_id
+    reference: ''
+  1225d6c7-4d4c-46ab-9a65-a8fa87826906: !Template
+    answer_choices: '{{answers | join("|||")}}'
+    id: 1225d6c7-4d4c-46ab-9a65-a8fa87826906
+    jinja: '{{ context }}
+
+      Question: {{ question }}
+
+      Options:
+
+      - {{ answer_choices | join(" \n - ") }}
+
+      ===
+
+      The correct answer is
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_question_answer_description_text
+    reference: ''
+  38caa4e6-28b9-4476-8609-b66c83679fcc: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 38caa4e6-28b9-4476-8609-b66c83679fcc
+    jinja: 'Read the following context and choose the correct option to answer the
+      question.
+
+      Context: {{ context }}
+
+      Question: {{ question }}
+
+      Options:
+
+      {% for k in range(answers | length) %}
+
+      {{''. ''.join([answer_choices[k], answers[k]])}}
+
+      {% endfor %}
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: description_context_question_answer_id
+    reference: ''
+  7186e352-adfa-4c16-8eda-d9fcccb6293e: !Template
+    answer_choices: '{{answers | join("|||")}}'
+    id: 7186e352-adfa-4c16-8eda-d9fcccb6293e
+    jinja: '{{ context }}
+
+      {{ question }}
+
+      Pick the correct answer from the following options:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_question_description_answer_text
+    reference: ''
+  773d1dad-ccc7-4f5d-936b-c43b2d3eedf7: !Template
+    answer_choices: '{{answers | join("|||")}}'
+    id: 773d1dad-ccc7-4f5d-936b-c43b2d3eedf7
+    jinja: '{{ context }}
+
+      Question: {{ question }}
+
+      ===
+
+      The answer to the above question is
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: context_question_description_text
+    reference: ''
+  7b0ce9fa-6aa0-4210-ab6c-1edd4b2f43df: !Template
+    answer_choices: '{{answers | join("|||")}}'
+    id: 7b0ce9fa-6aa0-4210-ab6c-1edd4b2f43df
+    jinja: '{{ context }}
+
+      According to the above context, answer the following question.
+
+      {{ question }}
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: context_description_question_text
+    reference: ''
+  7c9c7cec-12c1-4005-a9a1-a027e472d949: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 7c9c7cec-12c1-4005-a9a1-a027e472d949
+    jinja: '{{ context }}
+
+      {{ question }}
+
+      Pick the correct answer from the following options:
+
+      {% for k in range(answers | length) %}
+
+      {{''. ''.join([answer_choices[k], answers[k]])}}
+
+      {% endfor %}
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_question_description_answer_id
+    reference: ''
+  80fe7668-d088-4432-98bd-9df022a62b5b: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 80fe7668-d088-4432-98bd-9df022a62b5b
+    jinja: '{{ context }}
+
+      {{ question }}
+
+      {% for k in range(answers | length) %}
+
+      {{''. ''.join([answer_choices[k], answers[k]])}}
+
+      {% endfor %}
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: no_prompt_id
+    reference: ''
+  88d0056d-e736-405f-85aa-155474fde51a: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 88d0056d-e736-405f-85aa-155474fde51a
+    jinja: '{{ context }}
+
+      According to the above context, choose the correct option to answer the following
+      question.
+
+      Question: {{ question }}
+
+      Options:
+
+      {% for k in range(answers | length) %}
+
+      {{''. ''.join([answer_choices[k], answers[k]])}}
+
+      {% endfor %}
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_description_question_answer_id
+    reference: ''
+  a071e73e-5fda-45b5-8a6a-b56e477a6aee: !Template
+    answer_choices: '{{answers | join("|||")}}'
+    id: a071e73e-5fda-45b5-8a6a-b56e477a6aee
+    jinja: 'Read the following context and answer the question.
+
+      Context: {{ context }}
+
+      Question: {{ question }}
+
+      Answer:
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: description_context_question_text
+    reference: ''
+  cb57451d-2a1c-4db1-a352-9f50d835b327: !Template
+    answer_choices: '{{answers | join("|||")}}'
+    id: cb57451d-2a1c-4db1-a352-9f50d835b327
+    jinja: '{{ context }}
+
+      {{ question }}
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: no_prompt_text
+    reference: ''
+  ea0ba07f-bb89-42dc-b1e8-4fe6008297b2: !Template
+    answer_choices: '{{answers | join("|||")}}'
+    id: ea0ba07f-bb89-42dc-b1e8-4fe6008297b2
+    jinja: '{{ context }}
+
+      According to the above context, choose the correct option to answer the following
+      question.
+
+      Question: {{ question }}
+
+      Options:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: context_description_question_answer_text
+    reference: ''
+  f374c2ca-952a-47ab-8420-cb5fb2c693d9: !Template
+    answer_choices: '{{answers | join("|||")}}'
+    id: f374c2ca-952a-47ab-8420-cb5fb2c693d9
+    jinja: 'Read the following context and choose the correct option to answer the
+      question.
+
+      Context: {{ context }}
+
+      Question: {{ question }}
+
+      Options:
+
+      - {{ answer_choices | join("\n- ") }}
+
+      |||
+
+      {{ answer_choices[correct_answer_id] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: description_context_question_answer_text
+    reference: ''
diff --git a/promptsource/templates/quarel/templates.yaml b/promptsource/templates/quarel/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a5dd0e5aa899bc490ba2d7628e0704baa1ddb966
--- /dev/null
+++ b/promptsource/templates/quarel/templates.yaml
@@ -0,0 +1,93 @@
+dataset: quarel
+templates:
+  5904fd73-b1ee-4f89-b7bc-b0fe8cc07c66: !Template
+    answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+    id: 5904fd73-b1ee-4f89-b7bc-b0fe8cc07c66
+    jinja: 'Question: {{question}}
+
+
+      Do not use {{"A"}} and {{"B"}} to answer the question but instead, choose between
+      "{{answer_choices[0]}}" and  "{{answer_choices[1]}}".
+
+      |||
+
+      {{answer_choices[answer_index]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: do_not_use
+    reference: ''
+  5b5f9d29-0ad5-4bb9-831a-11fcb115c10d: !Template
+    answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+    id: 5b5f9d29-0ad5-4bb9-831a-11fcb115c10d
+    jinja: 'Here''s a logic test: {{question}}
+
+
+      Choose the answer between "{{answer_choices[0]}}" and "{{answer_choices[1]}}".
+
+      |||
+
+      {{answer_choices[answer_index]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: logic_test
+    reference: ''
+  63c58389-605a-42b9-85a6-a2586a954a92: !Template
+    answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+    id: 63c58389-605a-42b9-85a6-a2586a954a92
+    jinja: 'Here''s a short story: {{question}}.
+
+
+      What is the most sensical answer between "{{answer_choices[0]}}" and  "{{answer_choices[1]}}"?
+
+      |||
+
+      {{answer_choices[answer_index]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: heres_a_story
+    reference: ''
+  73a7adbb-41b1-4b4d-b378-d7e17d030a6f: !Template
+    answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+    id: 73a7adbb-41b1-4b4d-b378-d7e17d030a6f
+    jinja: 'Choose between "{{answer_choices[0]}}" and  "{{answer_choices[1]}}".
+
+      Question: {{question}}
+
+      |||
+
+      {{answer_choices[answer_index]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: choose_between
+    reference: ''
+  92013fab-5387-44d4-bf0f-e29a31bcafb6: !Template
+    answer_choices: '{{world_literals.world1[0]}} ||| {{world_literals.world2[0]}}'
+    id: 92013fab-5387-44d4-bf0f-e29a31bcafb6
+    jinja: 'I am testing my students'' logic.
+
+      What is the answer they should choose between "{{answer_choices[0]}}" and "{{answer_choices[1]}}"?
+
+      Logic test: {{question}}
+
+      |||
+
+      {{answer_choices[answer_index]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: testing_students
+    reference: ''
diff --git a/promptsource/templates/quartz/templates.yaml b/promptsource/templates/quartz/templates.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..8dad7f8291a9d438c95092c80e9c5e9750628ca4
--- /dev/null
+++ b/promptsource/templates/quartz/templates.yaml
@@ -0,0 +1,122 @@
+dataset: quartz
+templates:
+  005b9776-2170-47f8-a5d2-03e83d0e55ae: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 005b9776-2170-47f8-a5d2-03e83d0e55ae
+    jinja: "Use information from the paragraph to answer the question.\n\nQuestion:\n\
+      \n{% if '_____' in question %}\n{{ question | trim(\".?!\") | replace(\"_____\"\
+      , answer_choices | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question |\
+      \ trim(\".?!\") }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif\
+      \ %}\n\nParagraph :\n\n{{ para }}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: use_info_from_question_paragraph
+    reference: ''
+  01d6ae3e-87bb-456c-9722-92a214f6ff19: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 01d6ae3e-87bb-456c-9722-92a214f6ff19
+    jinja: "{{ para }}\n{% if '_____' in question %}\n{{ question | trim(\".?!\")\
+      \ | replace(\"_____\", answer_choices | join(\" or \")) }}{{ \"?\" }} \n{% else\
+      \ %}\n{{ question | trim(\".?!\")}} {{ answer_choices | join(\" or \") }}{{\
+      \ \"?\" }} \n{% endif %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: paragraph_question_plain_concat
+    reference: ''
+  22e29cab-f57f-4af7-92fc-72b131a96878: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 22e29cab-f57f-4af7-92fc-72b131a96878
+    jinja: "Use information from the paragraph to answer the question.\n\nParagraph\
+      \ :\n\n{{ para }}\n\nQuestion:\n\n{% if '_____' in question %}\n{{ question\
+      \ | trim(\".?!\") | replace(\"_____\", answer_choices | join(\" or \")) }}{{\
+      \ \"?\" }} \n{% else %}\n{{ question | trim(\".?!\") }} {{ answer_choices |\
+      \ join(\" or \") }}{{ \"?\" }} \n{% endif %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: use_info_from_paragraph_question
+    reference: ''
+  2f6baa0c-3b69-48be-b195-cc00cb5c96fa: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 2f6baa0c-3b69-48be-b195-cc00cb5c96fa
+    jinja: "Answer the question based on the following text.\n\nQuestion:\n\n{% if\
+      \ '_____' in question %}\n{{ question | trim(\".?!\") | replace(\"_____\", answer_choices\
+      \ | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question | trim(\".?!\")\
+      \ }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif %}\n\nText:\n\
+      \n{{ para }}|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: answer_question_based_on
+    reference: ''
+  397bdb29-03e8-478b-9840-1bfe9d57d6fb: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 397bdb29-03e8-478b-9840-1bfe9d57d6fb
+    jinja: "Answer the question below:\n\n{% if '_____' in question %}\n{{ question\
+      \ | trim(\".?!\") | replace(\"_____\", answer_choices | join(\" or \")) }}{{\
+      \ \"?\" }} \n{% else %}\n{{ question | trim(\".?!\") }} {{  answer_choices |\
+      \ join(\" or \") }}{{ \"?\" }} \n{% endif %}\n\nAssuming that:\n\n{{ para }}|||\n\
+      {{answer_choices[choices.label.index(answerKey)]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: answer_question_below
+    reference: ''
+  39a67851-fdf5-4f0d-bda2-4902be3a6bff: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 39a67851-fdf5-4f0d-bda2-4902be3a6bff
+    jinja: "Read the passage below and choose the right answer to the following question\
+      \ (choices are {{ answer_choices | join(\" or \") }} ):\n\n{{ para }}\n\n{%\
+      \ if '_____' in question %}\n{{ question | trim(\".?!\") | replace(\"_____\"\
+      , answer_choices | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question |\
+      \ trim(\".?!\") }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif\
+      \ %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: read_passage_below_choose
+    reference: ''
+  5c794ff0-32b9-43d4-b496-1a4d246ecfc0: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: 5c794ff0-32b9-43d4-b496-1a4d246ecfc0
+    jinja: "{{ para }}\n\nHaving read the above passage, choose the right answer to\
+      \ the following question (choices are {{ answer_choices | join(\" or \") }}\
+      \ ):\n\n{% if '_____' in question %}\n{{ question | trim(\".?!\") | replace(\"\
+      _____\", answer_choices | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question\
+      \ | trim(\".?!\") }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif\
+      \ %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: having_read_above_passage
+    reference: ''
+  a8c6ae4c-4874-47d1-93ea-801b6e080a58: !Template
+    answer_choices: '{{choices.text | join("|||")}}'
+    id: a8c6ae4c-4874-47d1-93ea-801b6e080a58
+    jinja: "Given the fact that:\n\n{{ para }}\n\nAnswer the question:\n\n{% if '_____'\
+      \ in question %}\n{{ question | trim(\".?!\") | replace(\"_____\", answer_choices\
+      \ | join(\" or \")) }}{{ \"?\" }} \n{% else %}\n{{ question | trim(\".?!\")\
+      \ }} {{ answer_choices | join(\" or \") }}{{ \"?\" }} \n{% endif %}\n|||\n{{answer_choices[choices.label.index(answerKey)]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: given_the_fact_answer_the_q
+    reference: ''
diff --git a/promptsource/templates/quora/templates.yaml b/promptsource/templates/quora/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fbf27171d696a1e2baf389a570e0aba97549c440
--- /dev/null
+++ b/promptsource/templates/quora/templates.yaml
@@ -0,0 +1,70 @@
+dataset: quora
+templates:
+  2c780ebe-f8e6-44f0-a804-0a3e53eb8cce: !Template
+    answer_choices: no ||| yes
+    id: 2c780ebe-f8e6-44f0-a804-0a3e53eb8cce
+    jinja: Given the question "{{questions.text.0}}" would you consider "{{questions.text.1}}"
+      as a duplicate?||| {{ answer_choices [is_duplicate] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: quora_implicit_1
+    reference: ''
+  3331355a-4d69-4060-ae9e-cdb951335ed2: !Template
+    answer_choices: no ||| yes
+    id: 3331355a-4d69-4060-ae9e-cdb951335ed2
+    jinja: Is the following question "{{questions.text.0}}" the same as "{{questions.text.1}}"?
+      ||| {{ answer_choices [is_duplicate] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: quora_basic_2
+    reference: ''
+  397b1fb9-0cf1-455b-aaf2-efdb750014c5: !Template
+    answer_choices: null
+    id: 397b1fb9-0cf1-455b-aaf2-efdb750014c5
+    jinja: '{% if is_duplicate == true%} Paraphrase the the following question: {%
+      if questions.text.0|length < questions.text.1|length %}  {{questions.text.0}}
+      |||  {{questions.text.1}} {% else %}  {{questions.text.1}} ||| {{questions.text.0}}
+      {% endif %}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: quora_rephrase_1
+    reference: ''
+  6de61945-992b-4191-9b3a-930e266769c9: !Template
+    answer_choices: true ||| false
+    id: 6de61945-992b-4191-9b3a-930e266769c9
+    jinja: The question "{{questions.text.0}}" is differs from "{{questions.text.1}}".
+      {{"true"}} or {{"false"}} ? ||| {{ answer_choices [is_duplicate] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: quora_basic_negation_3
+    reference: ''
+  7c367d58-e34f-4899-9c09-64a6a00a04b1: !Template
+    answer_choices: false ||| true
+    id: 7c367d58-e34f-4899-9c09-64a6a00a04b1
+    jinja: The question "{{questions.text.0}}" is the same as "{{questions.text.1}}".
+      {{"true"}} or {{"false"}} ? ||| {{ answer_choices [is_duplicate] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: quora_basic_3
+    reference: ''
+  7cc5ba2c-215d-4834-b41e-3ef717f6ac8c: !Template
+    answer_choices: No, they are original questions ||| Yes, the posts are the same
+    id: 7cc5ba2c-215d-4834-b41e-3ef717f6ac8c
+    jinja: Two new posts asked on quora are "{{questions.text.0}}" and "{{questions.text.1}}".
+      I feel like they have asked the same question. Am I correct? ||| {{answer_choices[is_duplicate]}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: quora_basic_4
+    reference: ''
diff --git a/promptsource/templates/quoref/templates.yaml b/promptsource/templates/quoref/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3054b3744f19ae479b042edf9625f05565150027
--- /dev/null
+++ b/promptsource/templates/quoref/templates.yaml
@@ -0,0 +1,200 @@
+dataset: quoref
+templates:
+  4120bebc-9c8f-44af-8d1a-a65e443ce010: !Template
+    answer_choices: null
+    id: 4120bebc-9c8f-44af-8d1a-a65e443ce010
+    jinja: 'The answer to the question: {{question}} is inside the article: {{context}},
+      can you guess it ?
+
+
+      |||
+
+      {{answers.text | choice}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Guess Answer
+    reference: ''
+  6f1d5031-1377-4b8a-9475-987b2275b8da: !Template
+    answer_choices: null
+    id: 6f1d5031-1377-4b8a-9475-987b2275b8da
+    jinja: 'Given the following context:
+
+
+      {{context}}
+
+
+      answer the following question:
+
+
+      {{question}} |||
+
+      {{answers.text | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Answer Question Given Context
+    reference: ''
+  9493f80a-daf5-4c30-a9fc-7bc5bc61b5e9: !Template
+    answer_choices: null
+    id: 9493f80a-daf5-4c30-a9fc-7bc5bc61b5e9
+    jinja: "The following article contains an answer for the question: {{question}}\
+      \ , can you please find it? \n\n{{context}}|||\n{{answers.text | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Find Answer
+    reference: ''
+  a3e5e25d-0a87-4cb8-89ab-3539fc4d23cb: !Template
+    answer_choices: null
+    id: a3e5e25d-0a87-4cb8-89ab-3539fc4d23cb
+    jinja: 'This article: {{context}} contains an answer for the question: {{question}},
+      what is it ?
+
+      |||
+
+      {{answers.text | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Context Contains Answer
+    reference: ''
+  aa26aab2-d2e7-4560-b7eb-0cbcff7c0f31: !Template
+    answer_choices: null
+    id: aa26aab2-d2e7-4560-b7eb-0cbcff7c0f31
+    jinja: '{{question}}
+
+
+      Answer the above question based on the context below:
+
+
+      {{context}} |||
+
+      {{answers.text | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Given Context Answer Question
+    reference: ''
+  abdfa570-2de5-406c-9051-caa6a1362796: !Template
+    answer_choices: null
+    id: abdfa570-2de5-406c-9051-caa6a1362796
+    jinja: 'What is the answer for the question: {{question}} from the following article
+      ?
+
+
+      {{context}}|||
+
+      {{answers.text | choice}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: What Is The Answer
+    reference: ''
+  b3ec0888-dd6f-466a-abd4-b2fbcacfdb8b: !Template
+    answer_choices: null
+    id: b3ec0888-dd6f-466a-abd4-b2fbcacfdb8b
+    jinja: 'I have a test where I am given the following article, what is an answer
+      for the question: {{question}} ?
+
+
+      {{context}}|||
+
+      {{answers.text | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Answer Test
+    reference: ''
+  bf525757-8cde-4839-81fb-a85be3fd1192: !Template
+    answer_choices: null
+    id: bf525757-8cde-4839-81fb-a85be3fd1192
+    jinja: 'Given the below context:
+
+
+      {{context}}
+
+
+      Guess a valid title for it! |||
+
+      {{title}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: false
+    name: Guess Title For Context
+    reference: ''
+  d055747f-7a32-4e12-aab1-fed35d42a445: !Template
+    answer_choices: null
+    id: d055747f-7a32-4e12-aab1-fed35d42a445
+    jinja: 'Found the following article online, use it to answer the question: {{question}}
+
+
+      {{context}}|||
+
+      {{answers.text | choice}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Found Context Online
+    reference: ''
+  d1abb8a0-03c4-41ef-865c-aa275278a0e4: !Template
+    answer_choices: null
+    id: d1abb8a0-03c4-41ef-865c-aa275278a0e4
+    jinja: 'A friend asked me to answer this question: {{question}}, using the article:
+      {{context}}, what would be the answer ?
+
+
+      |||
+
+      {{answers.text | choice}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Answer Friend Question
+    reference: ''
+  fcbe0609-06ce-4cbd-91de-adc38966bcac: !Template
+    answer_choices: null
+    id: fcbe0609-06ce-4cbd-91de-adc38966bcac
+    jinja: 'Read the following paragraph and extract the answer for the question:
+      {{question}}
+
+
+      {{context}} |||
+
+      {{answers.text | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: 'Read And Extract '
+    reference: ''
diff --git a/promptsource/templates/race/all/templates.yaml b/promptsource/templates/race/all/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5bd0eba2476a136484cd96d277069f8ff3ab35db
--- /dev/null
+++ b/promptsource/templates/race/all/templates.yaml
@@ -0,0 +1,186 @@
+dataset: race
+subset: all
+templates:
+  00ede994-778f-4d25-82dc-bae7ba9e115f: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 00ede994-778f-4d25-82dc-bae7ba9e115f
+    jinja: 'I''m taking a test and have to guess the right answer to the question
+      after the article.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Options: {{"A"}}: {{options.0}}
+
+      {{"B"}}: {{options.1}}
+
+      {{"C"}}: {{options.2}}
+
+      {{"D"}}: {{options.3}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Taking a test
+    reference: ''
+  02ed2a0c-b3a4-4b86-8524-e8961f042ae9: !Template
+    answer_choices: null
+    id: 02ed2a0c-b3a4-4b86-8524-e8961f042ae9
+    jinja: "Write a multi-choice question for the following article:\nArticle: {{article}}\n\
+      |||\nQuestion: \n{{question}}\nOptions:\n{{\"A\"}} {{options.0}}\n{{\"B\"}}\
+      \ {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\nAnswer:\n\
+      {{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Write a multi-choice question for the following article
+    reference: ''
+  59b5c4e3-9539-449f-ac60-04e681c705b5: !Template
+    answer_choices: '{{ options | join("|||") }}'
+    id: 59b5c4e3-9539-449f-ac60-04e681c705b5
+    jinja: 'Read the following article and answer the question.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Answer:
+
+      |||
+
+      {{ answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Read the article and answer the question (no option)
+    reference: ''
+  81368f4b-817f-4c81-9db5-b86905bb975e: !Template
+    answer_choices: '{{ options | join("|||") }}'
+    id: 81368f4b-817f-4c81-9db5-b86905bb975e
+    jinja: 'Read the following article and select the best answer.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      - {{answer_choices | join("\n- ")}}
+
+      |||
+
+      {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Select the best answer (generate span)
+    reference: ''
+  b808b05e-0d2a-459e-b345-2d83cdb20216: !Template
+    answer_choices: Yes ||| No
+    id: b808b05e-0d2a-459e-b345-2d83cdb20216
+    jinja: '{% set candidate = ["A", "B", "C", "D"] | choice %}
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Yes or no, is the answer "{{ [options.0,options.1,options.2,options.3][{"A":0,"B":1,"C":2,"D":3}[answer]]
+      }}"?
+
+      |||
+
+      {% if candidate == answer %}
+
+      Yes
+
+      {% else %}
+
+      No
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Is this the right answer
+    reference: ''
+  cce2b02b-6c47-4941-83d6-5ecb2dfedadc: !Template
+    answer_choices: null
+    id: cce2b02b-6c47-4941-83d6-5ecb2dfedadc
+    jinja: "Write a multi-choice question for the following article, with the given\
+      \ choices and answer:\nArticle: {{article}}\nOptions:\n{{\"A\"}} {{options.0}}\n\
+      {{\"B\"}} {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\n\
+      Answer:\n{{answer}} {{ [options.0,options.1,options.2,options.3][{\"A\":0,\"\
+      B\":1,\"C\":2,\"D\":3}[answer]] }}\nQuestion: \n|||\n{{question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Write a multi-choice question (options given)
+    reference: ''
+  e5c4d6a3-ff68-4243-93aa-2629e72d0d70: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: e5c4d6a3-ff68-4243-93aa-2629e72d0d70
+    jinja: 'Read the article and select the best answer.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Options: {{"A"}}: {{options.0}}
+
+      {{"B"}}: {{options.1}}
+
+      {{"C"}}: {{options.2}}
+
+      {{"D"}}: {{options.3}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Select the best answer
+    reference: ''
+  f79ba457-3c44-455f-a6ed-9c5f50d0e886: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: f79ba457-3c44-455f-a6ed-9c5f50d0e886
+    jinja: '{{article}}
+
+      {{question}}
+
+      {{"A)"}} {{options.0}}
+
+      {{"B)"}} {{options.1}}
+
+      {{"C)"}} {{options.2}}
+
+      {{"D)"}} {{options.3}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Select the best answer (no instructions)
+    reference: ''
diff --git a/promptsource/templates/race/high/templates.yaml b/promptsource/templates/race/high/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a0157b50b9c8f4162e9e0439ecb3ae65fc8704ea
--- /dev/null
+++ b/promptsource/templates/race/high/templates.yaml
@@ -0,0 +1,186 @@
+dataset: race
+subset: high
+templates:
+  26fdd0e9-9066-478f-8b5b-03fc0477bf7a: !Template
+    answer_choices: Yes ||| No
+    id: 26fdd0e9-9066-478f-8b5b-03fc0477bf7a
+    jinja: '{% set candidate = ["A", "B", "C", "D"] | choice %}
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Yes or no, is the answer "{{ [options.0,options.1,options.2,options.3][{"A":0,"B":1,"C":2,"D":3}[answer]]
+      }}"?
+
+      |||
+
+      {% if candidate == answer %}
+
+      Yes
+
+      {% else %}
+
+      No
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Is this the right answer
+    reference: ''
+  46b64805-d02c-4aa3-a7c5-34503f3ad56d: !Template
+    answer_choices: null
+    id: 46b64805-d02c-4aa3-a7c5-34503f3ad56d
+    jinja: "Write a multi-choice question for the following article:\nArticle: {{article}}\n\
+      |||\nQuestion: \n{{question}}\nOptions:\n{{\"A\"}} {{options.0}}\n{{\"B\"}}\
+      \ {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\nAnswer:\n\
+      {{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Write a multi-choice question for the following article
+    reference: ''
+  4ef4f2ee-6151-41b0-bdb7-e093cde8c42a: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 4ef4f2ee-6151-41b0-bdb7-e093cde8c42a
+    jinja: 'I''m taking a test and have to guess the right answer to the question
+      after the article.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Options: {{"A"}}: {{options.0}}
+
+      {{"B"}}: {{options.1}}
+
+      {{"C"}}: {{options.2}}
+
+      {{"D"}}: {{options.3}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Taking a test
+    reference: ''
+  5ed5f2e9-6cf3-4fc5-b9eb-246d9a4ee511: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 5ed5f2e9-6cf3-4fc5-b9eb-246d9a4ee511
+    jinja: 'Read the article and select the best answer.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Options: {{"A"}}: {{options.0}}
+
+      {{"B"}}: {{options.1}}
+
+      {{"C"}}: {{options.2}}
+
+      {{"D"}}: {{options.3}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Select the best answer
+    reference: ''
+  691f84af-ca02-458d-8912-f661aefccd52: !Template
+    answer_choices: null
+    id: 691f84af-ca02-458d-8912-f661aefccd52
+    jinja: "Write a multi-choice question for the following article, with the given\
+      \ choices and answer:\nArticle: {{article}}\nOptions:\n{{\"A\"}} {{options.0}}\n\
+      {{\"B\"}} {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\n\
+      Answer:\n{{answer}} {{ [options.0,options.1,options.2,options.3][{\"A\":0,\"\
+      B\":1,\"C\":2,\"D\":3}[answer]] }}\nQuestion: \n|||\n{{question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Write a multi-choice question (options given)
+    reference: ''
+  ab253338-5b02-46e8-9959-b66d1009c34a: !Template
+    answer_choices: '{{ options | join("|||") }}'
+    id: ab253338-5b02-46e8-9959-b66d1009c34a
+    jinja: 'Read the following article and select the best answer.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      - {{answer_choices | join("\n- ")}}
+
+      |||
+
+      {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Select the best answer (generate span)
+    reference: ''
+  c8c9dcfd-69d3-4ccd-8aeb-2bdb98aba261: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: c8c9dcfd-69d3-4ccd-8aeb-2bdb98aba261
+    jinja: '{{article}}
+
+      {{question}}
+
+      {{"A)"}} {{options.0}}
+
+      {{"B)"}} {{options.1}}
+
+      {{"C)"}} {{options.2}}
+
+      {{"D)"}} {{options.3}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Select the best answer (no instructions)
+    reference: ''
+  e1b9d073-e18e-4940-9868-5b4a35617c35: !Template
+    answer_choices: '{{ options | join("|||") }}'
+    id: e1b9d073-e18e-4940-9868-5b4a35617c35
+    jinja: 'Read the following article and answer the question.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Answer:
+
+      |||
+
+      {{ answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Read the article and answer the question (no option)
+    reference: ''
diff --git a/promptsource/templates/race/middle/templates.yaml b/promptsource/templates/race/middle/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2e34756b4465a31679820a4bcbb1746b0430f4d9
--- /dev/null
+++ b/promptsource/templates/race/middle/templates.yaml
@@ -0,0 +1,186 @@
+dataset: race
+subset: middle
+templates:
+  0a47d28c-7cf5-405d-b9ef-9b82c1a20002: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 0a47d28c-7cf5-405d-b9ef-9b82c1a20002
+    jinja: 'Read the article and select the best answer.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Options: {{"A"}}: {{options.0}}
+
+      {{"B"}}: {{options.1}}
+
+      {{"C"}}: {{options.2}}
+
+      {{"D"}}: {{options.3}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Select the best answer
+    reference: ''
+  1a68b62e-404c-4037-baec-7e20cb4c3f6b: !Template
+    answer_choices: '{{ options | join("|||") }}'
+    id: 1a68b62e-404c-4037-baec-7e20cb4c3f6b
+    jinja: 'Read the following article and answer the question.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Answer:
+
+      |||
+
+      {{ answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Read the article and answer the question (no option)
+    reference: ''
+  2e7f5fff-518e-4100-90f9-cca094b11e95: !Template
+    answer_choices: Yes ||| No
+    id: 2e7f5fff-518e-4100-90f9-cca094b11e95
+    jinja: '{% set candidate = ["A", "B", "C", "D"] | choice %}
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Yes or no, is the answer "{{ [options.0,options.1,options.2,options.3][{"A":0,"B":1,"C":2,"D":3}[answer]]
+      }}"?
+
+      |||
+
+      {% if candidate == answer %}
+
+      Yes
+
+      {% else %}
+
+      No
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Is this the right answer
+    reference: ''
+  6f2e7b0d-9691-4e28-9666-6c4d478a1641: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: 6f2e7b0d-9691-4e28-9666-6c4d478a1641
+    jinja: '{{article}}
+
+      {{question}}
+
+      {{"A)"}} {{options.0}}
+
+      {{"B)"}} {{options.1}}
+
+      {{"C)"}} {{options.2}}
+
+      {{"D)"}} {{options.3}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Select the best answer (no instructions)
+    reference: ''
+  9aacc46d-8863-4e02-9783-9ec931425759: !Template
+    answer_choices: '{{ options | join("|||") }}'
+    id: 9aacc46d-8863-4e02-9783-9ec931425759
+    jinja: 'Read the following article and select the best answer.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      - {{answer_choices | join("\n- ")}}
+
+      |||
+
+      {{answer_choices[{"A":0,"B":1,"C":2,"D":3}[answer]]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Select the best answer (generate span)
+    reference: ''
+  9aedaa07-b815-4a35-890b-6100f00706aa: !Template
+    answer_choices: null
+    id: 9aedaa07-b815-4a35-890b-6100f00706aa
+    jinja: "Write a multi-choice question for the following article, with the given\
+      \ choices and answer:\nArticle: {{article}}\nOptions:\n{{\"A\"}} {{options.0}}\n\
+      {{\"B\"}} {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\n\
+      Answer:\n{{answer}} {{ [options.0,options.1,options.2,options.3][{\"A\":0,\"\
+      B\":1,\"C\":2,\"D\":3}[answer]] }}\nQuestion: \n|||\n{{question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Write a multi-choice question (options given)
+    reference: ''
+  af4869c4-35af-4644-86d9-27843ca4efd5: !Template
+    answer_choices: null
+    id: af4869c4-35af-4644-86d9-27843ca4efd5
+    jinja: "Write a multi-choice question for the following article:\nArticle: {{article}}\n\
+      |||\nQuestion: \n{{question}}\nOptions:\n{{\"A\"}} {{options.0}}\n{{\"B\"}}\
+      \ {{options.1}}\n{{\"C\"}} {{options.2}}\n{{\"D\"}} {{options.3}}\nAnswer:\n\
+      {{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Write a multi-choice question for the following article
+    reference: ''
+  ebe34816-2a1f-42b3-a9ac-ce4d36633fdb: !Template
+    answer_choices: A ||| B ||| C ||| D
+    id: ebe34816-2a1f-42b3-a9ac-ce4d36633fdb
+    jinja: 'I''m taking a test and have to guess the right answer to the question
+      after the article.
+
+      Article: {{article}}
+
+      Question: {{question}}
+
+      Options: {{"A"}}: {{options.0}}
+
+      {{"B"}}: {{options.1}}
+
+      {{"C"}}: {{options.2}}
+
+      {{"D"}}: {{options.3}}
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Taking a test
+    reference: ''
diff --git a/promptsource/templates/ropes/templates.yaml b/promptsource/templates/ropes/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..22e42c4379f80271b706aa2d05e2ff7a4a5174b2
--- /dev/null
+++ b/promptsource/templates/ropes/templates.yaml
@@ -0,0 +1,256 @@
+dataset: ropes
+templates:
+  0791ec30-6361-4e62-8dce-ca9cbf997acc: !Template
+    answer_choices: null
+    id: 0791ec30-6361-4e62-8dce-ca9cbf997acc
+    jinja: "{% if answers.text %}\nPlease answer correctly the following question\
+      \ related to the paragraph below. \n\n{{ question }}\n\n{{ situation }}\n\n\
+      Hint: {{ background }}\n|||\n{{ answers.text | choice }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: prompt_beginning
+    reference: ''
+  0909d72d-50c7-4cbb-bec4-1f891123717c: !Template
+    answer_choices: null
+    id: 0909d72d-50c7-4cbb-bec4-1f891123717c
+    jinja: "{% if answers.text %}\n{{ situation }}\n\nGiven the paragraph above, please\
+      \ answer correctly the following question: \n\n{{ question }}\n|||\n{{ answers.text\
+      \ | choice }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: prompt_bottom_no_hint
+    reference: ''
+  1e4944e7-4d5b-475c-8b04-4b523e96bc51: !Template
+    answer_choices: null
+    id: 1e4944e7-4d5b-475c-8b04-4b523e96bc51
+    jinja: '{% if answers.text %}
+
+      Background: {{ background }}
+
+
+      Paragraph: {{ situation }}
+
+
+      Given the paragraph above, please answer correctly the following question: {{
+      question }}
+
+      |||
+
+      {{ answers.text | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: prompt_bottom_hint_beginning
+    reference: ''
+  27fb16c6-a563-46ef-af73-42e15183824e: !Template
+    answer_choices: null
+    id: 27fb16c6-a563-46ef-af73-42e15183824e
+    jinja: '{% if answers.text %}
+
+      Given the background: {{background}}
+
+
+      and the situation: {{situation}}
+
+
+      Answer the following question: {{question}}|||
+
+      {{ answers.text | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: given_background_situation
+    reference: ''
+  31faf808-80ff-47af-ac49-d2cd7a7abcaf: !Template
+    answer_choices: null
+    id: 31faf808-80ff-47af-ac49-d2cd7a7abcaf
+    jinja: '{% if answers.text %}
+
+      {{ situation }}
+
+
+      {{ question }}
+
+
+      |||
+
+      {{ answers.text | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: plain_no_background
+    reference: Task without background
+  473f2c9c-9731-443c-a641-5e43770f7df6: !Template
+    answer_choices: null
+    id: 473f2c9c-9731-443c-a641-5e43770f7df6
+    jinja: '{% if answers.text %}
+
+      {{ situation }}
+
+
+      {{ question }}
+
+
+      Hint: {{ background }}
+
+      |||
+
+      {{ answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: plain_bottom_hint
+    reference: ''
+  a04f69ac-8122-4618-8426-185fc043feca: !Template
+    answer_choices: null
+    id: a04f69ac-8122-4618-8426-185fc043feca
+    jinja: '{% if answers.text %}
+
+      {{ background }}
+
+
+      {{ situation }}
+
+
+      {{ question }}
+
+      |||
+
+      {{ answers.text | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: plain_background_situation
+    reference: ''
+  a17aefbb-c571-4127-8170-379e2ec83774: !Template
+    answer_choices: null
+    id: a17aefbb-c571-4127-8170-379e2ec83774
+    jinja: '{% if answers.text %}
+
+      I can use this background: {{background}}
+
+
+      Now, I have a new situation: {{situation}}
+
+
+      Answer this question please: {{question}}|||
+
+      {{ answers.text | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: background_new_situation_answer
+    reference: ''
+  b6da4f12-5384-46f5-a74e-c703c19d1698: !Template
+    answer_choices: null
+    id: b6da4f12-5384-46f5-a74e-c703c19d1698
+    jinja: '{% if answers.text %}
+
+      You are given a new situation: {{situation}}
+
+
+      and a hint : {{background}}
+
+
+      Please answer this question : {{question}}|||
+
+      {{ answers.text | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: background_situation_middle
+    reference: ''
+  cc747655-6472-4023-95e4-03cb85d5a1c5: !Template
+    answer_choices: null
+    id: cc747655-6472-4023-95e4-03cb85d5a1c5
+    jinja: '{% if answers.text %}
+
+      I have a new situation: {{situation}}
+
+
+      But I can use this background: {{background}}
+
+
+      What is an answer for this question: {{question}}|||
+
+      {{ answers.text | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: new_situation_background_answer
+    reference: ''
+  cc8f3c6b-b800-4b47-b6ec-e8febfdaad6f: !Template
+    answer_choices: null
+    id: cc8f3c6b-b800-4b47-b6ec-e8febfdaad6f
+    jinja: "{% if answers.text %}\n{{ situation }}\n\nGiven the paragraph above, please\
+      \ answer correctly the following question: \n\n{{ question }}\n\nHint: {{ background\
+      \ }}\n|||\n{{ answers.text | choice }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: prompt_mix
+    reference: ''
+  f62e0adb-ca74-4280-8ed3-8b53411d87ce: !Template
+    answer_choices: null
+    id: f62e0adb-ca74-4280-8ed3-8b53411d87ce
+    jinja: '{% if answers.text %}
+
+      I read this background article the other day: {{background}}
+
+
+      I am facing a new situation today: {{situation}}
+
+
+      Using the knowledge I acquired from the background article, how should I answer
+      correctly the following question regarding my new situation: {{question}}|||
+
+      {{ answers.text | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: read_background_situation
+    reference: ''
diff --git a/promptsource/templates/rotten_tomatoes/templates.yaml b/promptsource/templates/rotten_tomatoes/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7197e1c7b7c728fdee55d07a9146ea398ac8f690
--- /dev/null
+++ b/promptsource/templates/rotten_tomatoes/templates.yaml
@@ -0,0 +1,122 @@
+dataset: rotten_tomatoes
+templates:
+  10adbcf1-b839-4522-bd76-567f0c760474: !Template
+    answer_choices: bad ||| good
+    id: 10adbcf1-b839-4522-bd76-567f0c760474
+    jinja: '{{text}} Did the reviewer find this movie {{"good or bad"}}? ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Opinion bad good choices
+    reference: ''
+  162f7f89-4a93-42e9-9525-ba12e243ee48: !Template
+    answer_choices: negative ||| positive
+    id: 162f7f89-4a93-42e9-9525-ba12e243ee48
+    jinja: '{{text}} What is the sentiment expressed in this text? ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Text Expressed Sentiment
+    reference: ''
+  37ac89b8-09f8-443d-982c-980a86f26ea0: !Template
+    answer_choices: negative ||| positive
+    id: 37ac89b8-09f8-443d-982c-980a86f26ea0
+    jinja: "{{text}} \nIs this review {{\"positive or negative\"}}? ||| \n{{answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: 'Sentiment with choices '
+    reference: ''
+  59e2aa7c-696f-4b85-87e9-688ea802d968: !Template
+    answer_choices: No ||| Yes
+    id: 59e2aa7c-696f-4b85-87e9-688ea802d968
+    jinja: '{{text}} Did the reviewer enjoy the movie? ||| {{ answer_choices [label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Enjoyment Yes No
+    reference: ''
+  7a8ccb1c-6737-4863-b08a-61d4a2839204: !Template
+    answer_choices: They didn't like it ||| They loved it
+    id: 7a8ccb1c-6737-4863-b08a-61d4a2839204
+    jinja: '{{text}} How does the reviewer feel about the movie? ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Enjoyment
+    reference: ''
+  94e190d5-2196-486e-908b-759f288eac6e: !Template
+    answer_choices: negative ||| positive
+    id: 94e190d5-2196-486e-908b-759f288eac6e
+    jinja: '{{text}} The sentiment expressed for the movie is ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Movie Expressed Sentiment
+    reference: ''
+  a8f6927e-7eca-4975-a93c-f520f8be480d: !Template
+    answer_choices: negative ||| positive
+    id: a8f6927e-7eca-4975-a93c-f520f8be480d
+    jinja: '{{text}} What sentiment does the writer express for the movie? ||| {{
+      answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Writer Expressed Sentiment
+    reference: ''
+  b60cad41-6bca-422a-aef7-cb113fcc32b0: !Template
+    answer_choices: negative ||| positive
+    id: b60cad41-6bca-422a-aef7-cb113fcc32b0
+    jinja: The following movie review expresses what sentiment? {{text}} ||| {{ answer_choices
+      [label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Movie Expressed Sentiment 2
+    reference: ''
+  c75e322d-d6b4-4a28-b5a0-27fddfee694d: !Template
+    answer_choices: negative ||| positive
+    id: c75e322d-d6b4-4a28-b5a0-27fddfee694d
+    jinja: '{{text}} What is the sentiment expressed by the reviewer for the movie?
+      ||| {{ answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Expressed Sentiment
+    reference: ''
+  e05ec7b9-5a8d-4670-9723-0237c1bb1eca: !Template
+    answer_choices: negative ||| positive
+    id: e05ec7b9-5a8d-4670-9723-0237c1bb1eca
+    jinja: '{{text}} How does the viewer feel about the movie? ||| {{ answer_choices
+      [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Reviewer Sentiment Feeling
+    reference: ''
diff --git a/promptsource/templates/samsum/templates.yaml b/promptsource/templates/samsum/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7d92f163b28bc49ee0402be9ad9410468970c6f7
--- /dev/null
+++ b/promptsource/templates/samsum/templates.yaml
@@ -0,0 +1,97 @@
+dataset: samsum
+templates:
+  01faf0cd-d9d8-4245-b86f-e7e13c2972ff: !Template
+    answer_choices: null
+    id: 01faf0cd-d9d8-4245-b86f-e7e13c2972ff
+    jinja: 'Summarize this dialogue: {{dialogue}} |||
+
+      {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      original_task: true
+    name: 'Summarize this dialogue:'
+    reference: ''
+  182a251f-2f76-4b36-8d2e-417f8d43f729: !Template
+    answer_choices: null
+    id: 182a251f-2f76-4b36-8d2e-417f8d43f729
+    jinja: '{{dialogue}}
+
+      Given the above dialogue, write a summary. |||
+
+      {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      original_task: true
+    name: Given the above dialogue write a summary
+    reference: ''
+  72eda731-894d-4260-9113-9e492822f80e: !Template
+    answer_choices: null
+    id: 72eda731-894d-4260-9113-9e492822f80e
+    jinja: 'Summarize: {{dialogue}}|||
+
+      {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      original_task: true
+    name: 'Summarize:'
+    reference: ''
+  7bd51f5b-5bac-429e-b8f9-dd6782b92a59: !Template
+    answer_choices: null
+    id: 7bd51f5b-5bac-429e-b8f9-dd6782b92a59
+    jinja: '{{dialogue}}
+
+      To sum up this dialog:
+
+      |||{{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      original_task: true
+    name: To sum up this dialog
+    reference: ''
+  8d829dcb-ea64-457d-b025-f16e31c2834a: !Template
+    answer_choices: null
+    id: 8d829dcb-ea64-457d-b025-f16e31c2834a
+    jinja: 'Generate a summary for this dialogue:
+
+      {{dialogue}}
+
+      |||{{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      original_task: true
+    name: Generate a summary for this dialogue
+    reference: ''
+  9f571a72-6813-4307-9aae-753ca0f737c5: !Template
+    answer_choices: null
+    id: 9f571a72-6813-4307-9aae-753ca0f737c5
+    jinja: 'Write a dialogue that matches this summary: {{summary}} |||
+
+      {{dialogue}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      original_task: false
+    name: Write a dialogue that match this summary
+    reference: ''
+  bd891653-49b6-40bb-968f-8e6632c75659: !Template
+    answer_choices: null
+    id: bd891653-49b6-40bb-968f-8e6632c75659
+    jinja: "Sum up the following dialogue: \n{{dialogue}}\n|||{{summary}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      original_task: true
+    name: Sum up the following dialogue
+    reference: ''
diff --git a/promptsource/templates/scan/addprim_jump/templates.yaml b/promptsource/templates/scan/addprim_jump/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c0f4c7626cce7b2dde532bb353539f9be838a75f
--- /dev/null
+++ b/promptsource/templates/scan/addprim_jump/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: addprim_jump
+templates:
+  013efd14-7197-46fc-8937-1fbd7a824161: !Template
+    answer_choices: null
+    id: 013efd14-7197-46fc-8937-1fbd7a824161
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  24bb4d4d-0fc5-4859-89f8-eb2113115482: !Template
+    answer_choices: null
+    id: 24bb4d4d-0fc5-4859-89f8-eb2113115482
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  3f01f525-3920-4e31-bce6-430f31f80942: !Template
+    answer_choices: null
+    id: 3f01f525-3920-4e31-bce6-430f31f80942
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  4653d6f3-90ae-402e-a658-2d12c2eae490: !Template
+    answer_choices: null
+    id: 4653d6f3-90ae-402e-a658-2d12c2eae490
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  60543324-7eaf-404d-b144-e36e19d4ddd7: !Template
+    answer_choices: null
+    id: 60543324-7eaf-404d-b144-e36e19d4ddd7
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  6e9859a4-f600-44e4-ab8d-c2cdd06f482d: !Template
+    answer_choices: null
+    id: 6e9859a4-f600-44e4-ab8d-c2cdd06f482d
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
+  7b987d1a-874a-4de9-bf37-e2c1998f4a6d: !Template
+    answer_choices: null
+    id: 7b987d1a-874a-4de9-bf37-e2c1998f4a6d
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  84553f4f-85a5-4955-b425-d501c8973ae9: !Template
+    answer_choices: null
+    id: 84553f4f-85a5-4955-b425-d501c8973ae9
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  9a8cb50e-fded-46e5-ab17-ced55972f362: !Template
+    answer_choices: null
+    id: 9a8cb50e-fded-46e5-ab17-ced55972f362
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  9ffe829c-4879-45df-85c4-891589d9a648: !Template
+    answer_choices: null
+    id: 9ffe829c-4879-45df-85c4-891589d9a648
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  ab9c675b-4949-4469-860f-ec7ae0abbc01: !Template
+    answer_choices: null
+    id: ab9c675b-4949-4469-860f-ec7ae0abbc01
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  f3f0e58e-cc50-4d9b-aec7-408eb5f9e422: !Template
+    answer_choices: null
+    id: f3f0e58e-cc50-4d9b-aec7-408eb5f9e422
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
diff --git a/promptsource/templates/scan/addprim_turn_left/templates.yaml b/promptsource/templates/scan/addprim_turn_left/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..df0cdec840a0b7c2779c39ad031dafd3e04ecaa7
--- /dev/null
+++ b/promptsource/templates/scan/addprim_turn_left/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: addprim_turn_left
+templates:
+  084fa8b9-8ca3-42bf-8bd5-ab58d3988aa7: !Template
+    answer_choices: null
+    id: 084fa8b9-8ca3-42bf-8bd5-ab58d3988aa7
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  1ac8d7f5-b407-4288-9c8d-cc301f3d174d: !Template
+    answer_choices: null
+    id: 1ac8d7f5-b407-4288-9c8d-cc301f3d174d
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  57888404-65cb-41a7-ba0d-cc4bb3f86a05: !Template
+    answer_choices: null
+    id: 57888404-65cb-41a7-ba0d-cc4bb3f86a05
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  9e5544de-374a-42e0-b30f-c62d8e1c8210: !Template
+    answer_choices: null
+    id: 9e5544de-374a-42e0-b30f-c62d8e1c8210
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  b1cb45aa-be3b-4280-9edd-c55e02ace580: !Template
+    answer_choices: null
+    id: b1cb45aa-be3b-4280-9edd-c55e02ace580
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  b6b704e9-a3df-4146-a3d9-dd870fe19906: !Template
+    answer_choices: null
+    id: b6b704e9-a3df-4146-a3d9-dd870fe19906
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  beb5693d-1153-47d1-ac71-17f2b34c7fe6: !Template
+    answer_choices: null
+    id: beb5693d-1153-47d1-ac71-17f2b34c7fe6
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  e76ed0f4-f4e5-4b24-8710-01b43cc74f0e: !Template
+    answer_choices: null
+    id: e76ed0f4-f4e5-4b24-8710-01b43cc74f0e
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  e883faa5-0d15-4827-b996-77b198360515: !Template
+    answer_choices: null
+    id: e883faa5-0d15-4827-b996-77b198360515
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  f6fa6715-ce35-4e76-9f93-1cb97d8009ac: !Template
+    answer_choices: null
+    id: f6fa6715-ce35-4e76-9f93-1cb97d8009ac
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  fa08d415-9a16-4260-91e2-8d5d464167d8: !Template
+    answer_choices: null
+    id: fa08d415-9a16-4260-91e2-8d5d464167d8
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  fe309524-4bcd-442a-a489-3270812a63c8: !Template
+    answer_choices: null
+    id: fe309524-4bcd-442a-a489-3270812a63c8
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
diff --git a/promptsource/templates/scan/filler_num0/templates.yaml b/promptsource/templates/scan/filler_num0/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..365e09df8abb3a23878a3f547c95eec675fcd311
--- /dev/null
+++ b/promptsource/templates/scan/filler_num0/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: filler_num0
+templates:
+  118ba505-cc28-4ed5-976a-00c4b2576c8f: !Template
+    answer_choices: null
+    id: 118ba505-cc28-4ed5-976a-00c4b2576c8f
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  3456f354-88c2-4e04-9ac4-4c05a9d7e280: !Template
+    answer_choices: null
+    id: 3456f354-88c2-4e04-9ac4-4c05a9d7e280
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  357d5815-14cf-4d8f-bb68-39163d699da1: !Template
+    answer_choices: null
+    id: 357d5815-14cf-4d8f-bb68-39163d699da1
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  498daded-7731-4ff8-9eea-686d61ab3eee: !Template
+    answer_choices: null
+    id: 498daded-7731-4ff8-9eea-686d61ab3eee
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  581d8984-4960-4005-b0e1-a7a9dcc3021e: !Template
+    answer_choices: null
+    id: 581d8984-4960-4005-b0e1-a7a9dcc3021e
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  6527f043-0af1-437a-ba1d-5ad7f9fec11c: !Template
+    answer_choices: null
+    id: 6527f043-0af1-437a-ba1d-5ad7f9fec11c
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  70c672a4-549b-499d-bdf1-eaca3959ac32: !Template
+    answer_choices: null
+    id: 70c672a4-549b-499d-bdf1-eaca3959ac32
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  7f403c5d-de28-4449-b1ca-aa3dfd7a06bd: !Template
+    answer_choices: null
+    id: 7f403c5d-de28-4449-b1ca-aa3dfd7a06bd
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  94989ff1-ea2d-4ce1-aa82-c5deb5250c50: !Template
+    answer_choices: null
+    id: 94989ff1-ea2d-4ce1-aa82-c5deb5250c50
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
+  d3625591-8e47-4670-a0b4-2f61acbc9e71: !Template
+    answer_choices: null
+    id: d3625591-8e47-4670-a0b4-2f61acbc9e71
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  d5f3f82d-0360-4270-abd6-2df32775e797: !Template
+    answer_choices: null
+    id: d5f3f82d-0360-4270-abd6-2df32775e797
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  deadab5f-3e15-45ec-adb7-84ee6446d380: !Template
+    answer_choices: null
+    id: deadab5f-3e15-45ec-adb7-84ee6446d380
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
diff --git a/promptsource/templates/scan/filler_num1/templates.yaml b/promptsource/templates/scan/filler_num1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ff5c025ced893221c3daebc7c9ad1fa9a5b2147e
--- /dev/null
+++ b/promptsource/templates/scan/filler_num1/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: filler_num1
+templates:
+  21cbc219-c6ec-46d8-8cfc-e039e0429746: !Template
+    answer_choices: null
+    id: 21cbc219-c6ec-46d8-8cfc-e039e0429746
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  23b9f23e-8c4d-4275-85f2-9cea58bb4e23: !Template
+    answer_choices: null
+    id: 23b9f23e-8c4d-4275-85f2-9cea58bb4e23
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  316ad8b2-edca-4ca8-94d3-941fa4a46757: !Template
+    answer_choices: null
+    id: 316ad8b2-edca-4ca8-94d3-941fa4a46757
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  46d86128-23a7-4c5a-9ac3-9c6b11b4f168: !Template
+    answer_choices: null
+    id: 46d86128-23a7-4c5a-9ac3-9c6b11b4f168
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  5a421d94-4059-4edf-a4ea-7dc862948976: !Template
+    answer_choices: null
+    id: 5a421d94-4059-4edf-a4ea-7dc862948976
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  775cb4bf-ddbc-4cd7-b7b8-cdf095970347: !Template
+    answer_choices: null
+    id: 775cb4bf-ddbc-4cd7-b7b8-cdf095970347
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  7b1a7d93-03cc-47f1-a17f-5ca973d546ea: !Template
+    answer_choices: null
+    id: 7b1a7d93-03cc-47f1-a17f-5ca973d546ea
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  80a30953-99c4-44aa-b5bf-da4b35c81269: !Template
+    answer_choices: null
+    id: 80a30953-99c4-44aa-b5bf-da4b35c81269
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
+  83bcb225-85e0-4a18-becc-ee74e095c67a: !Template
+    answer_choices: null
+    id: 83bcb225-85e0-4a18-becc-ee74e095c67a
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  9d0c5da0-4d60-4e66-b273-091c39dcc2b7: !Template
+    answer_choices: null
+    id: 9d0c5da0-4d60-4e66-b273-091c39dcc2b7
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  a61db237-78a5-46f2-a791-43728b5e4be8: !Template
+    answer_choices: null
+    id: a61db237-78a5-46f2-a791-43728b5e4be8
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  d7037f77-f0b5-4c40-80ed-20dd637826f0: !Template
+    answer_choices: null
+    id: d7037f77-f0b5-4c40-80ed-20dd637826f0
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
diff --git a/promptsource/templates/scan/filler_num2/templates.yaml b/promptsource/templates/scan/filler_num2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..81cb0c2e4695bb125b4caea6f8e3801b59a2a481
--- /dev/null
+++ b/promptsource/templates/scan/filler_num2/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: filler_num2
+templates:
+  05ae758e-801a-4295-8f25-605114379a55: !Template
+    answer_choices: null
+    id: 05ae758e-801a-4295-8f25-605114379a55
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  19dfabe3-3dab-424a-b102-594e840dd93d: !Template
+    answer_choices: null
+    id: 19dfabe3-3dab-424a-b102-594e840dd93d
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  27e17a7d-54c6-40b2-961c-f0a8409e94ee: !Template
+    answer_choices: null
+    id: 27e17a7d-54c6-40b2-961c-f0a8409e94ee
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  2840eb59-129f-41aa-be04-67d0b93126a2: !Template
+    answer_choices: null
+    id: 2840eb59-129f-41aa-be04-67d0b93126a2
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  5a1fbf2d-73c7-4310-8c1d-8cee38509a12: !Template
+    answer_choices: null
+    id: 5a1fbf2d-73c7-4310-8c1d-8cee38509a12
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  6fe70c4d-379e-4d29-9dbb-117d1ca0d9f4: !Template
+    answer_choices: null
+    id: 6fe70c4d-379e-4d29-9dbb-117d1ca0d9f4
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  751e07c6-2a37-4fed-8ee0-a7f501acaeda: !Template
+    answer_choices: null
+    id: 751e07c6-2a37-4fed-8ee0-a7f501acaeda
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  9300d348-fbc8-4e7a-8a9c-9ff5dda44448: !Template
+    answer_choices: null
+    id: 9300d348-fbc8-4e7a-8a9c-9ff5dda44448
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  a4cdaf62-bb3d-4f0a-81d8-a5c02519ed47: !Template
+    answer_choices: null
+    id: a4cdaf62-bb3d-4f0a-81d8-a5c02519ed47
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
+  c1fe04f7-be41-4ac2-a936-193187271067: !Template
+    answer_choices: null
+    id: c1fe04f7-be41-4ac2-a936-193187271067
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  db701a1b-af5b-491c-b5d6-cd4419444639: !Template
+    answer_choices: null
+    id: db701a1b-af5b-491c-b5d6-cd4419444639
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  e90eee3e-f2a4-4b9f-b5c3-9edc76c6e38c: !Template
+    answer_choices: null
+    id: e90eee3e-f2a4-4b9f-b5c3-9edc76c6e38c
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
diff --git a/promptsource/templates/scan/filler_num3/templates.yaml b/promptsource/templates/scan/filler_num3/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1b05c52228e0c166ed7ab26f4fbe358db9b08239
--- /dev/null
+++ b/promptsource/templates/scan/filler_num3/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: filler_num3
+templates:
+  0154a526-fdda-4c75-be3b-995bbd6f4cf5: !Template
+    answer_choices: null
+    id: 0154a526-fdda-4c75-be3b-995bbd6f4cf5
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  16214e0a-53f4-44a6-bdea-a212598f054a: !Template
+    answer_choices: null
+    id: 16214e0a-53f4-44a6-bdea-a212598f054a
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  1aa8f46d-4859-4f53-8630-718332409ff8: !Template
+    answer_choices: null
+    id: 1aa8f46d-4859-4f53-8630-718332409ff8
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  51caf65b-b6ab-412a-9513-f9e20e727b99: !Template
+    answer_choices: null
+    id: 51caf65b-b6ab-412a-9513-f9e20e727b99
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  572d665c-cf6b-4302-a112-da225a83dced: !Template
+    answer_choices: null
+    id: 572d665c-cf6b-4302-a112-da225a83dced
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  648ffbee-8432-4ba8-ad86-42a15a21a201: !Template
+    answer_choices: null
+    id: 648ffbee-8432-4ba8-ad86-42a15a21a201
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  6ce15057-a2c0-489e-8c5b-1978a5692bab: !Template
+    answer_choices: null
+    id: 6ce15057-a2c0-489e-8c5b-1978a5692bab
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  80c18052-3a25-4b1d-a99e-37a0dd56793b: !Template
+    answer_choices: null
+    id: 80c18052-3a25-4b1d-a99e-37a0dd56793b
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  a619fe38-cc02-4d9b-ba8d-b9ab2edce5c8: !Template
+    answer_choices: null
+    id: a619fe38-cc02-4d9b-ba8d-b9ab2edce5c8
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  deffa981-1fa3-4b21-9bc1-6b87675c3aa6: !Template
+    answer_choices: null
+    id: deffa981-1fa3-4b21-9bc1-6b87675c3aa6
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  e5b8785b-06d5-4c44-af98-d6d856726b6e: !Template
+    answer_choices: null
+    id: e5b8785b-06d5-4c44-af98-d6d856726b6e
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
+  e61ce9e3-c9b8-417f-a2c3-2cd66cf74d5d: !Template
+    answer_choices: null
+    id: e61ce9e3-c9b8-417f-a2c3-2cd66cf74d5d
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
diff --git a/promptsource/templates/scan/length/templates.yaml b/promptsource/templates/scan/length/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..33eb3ff3692e8b477b4fb4f8ed3d5483b7c133d3
--- /dev/null
+++ b/promptsource/templates/scan/length/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: length
+templates:
+  06b65e1e-3fa6-48ba-8f20-7b9f6d70611a: !Template
+    answer_choices: null
+    id: 06b65e1e-3fa6-48ba-8f20-7b9f6d70611a
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  087a45d9-99a8-4b49-8b72-bab00a7ccf96: !Template
+    answer_choices: null
+    id: 087a45d9-99a8-4b49-8b72-bab00a7ccf96
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  20c18eec-65a7-4de4-9cce-de266d0ada21: !Template
+    answer_choices: null
+    id: 20c18eec-65a7-4de4-9cce-de266d0ada21
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  42187f63-1354-43bb-adbd-79c0ee6e6c6d: !Template
+    answer_choices: null
+    id: 42187f63-1354-43bb-adbd-79c0ee6e6c6d
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  5d5957da-8837-4b6d-89b2-f8fbfef18aa2: !Template
+    answer_choices: null
+    id: 5d5957da-8837-4b6d-89b2-f8fbfef18aa2
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  70fed5f3-d452-4f8a-bfc9-b27e8054ad7e: !Template
+    answer_choices: null
+    id: 70fed5f3-d452-4f8a-bfc9-b27e8054ad7e
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  7f91a7ba-9faa-43ba-bd7a-bb76c53aa394: !Template
+    answer_choices: null
+    id: 7f91a7ba-9faa-43ba-bd7a-bb76c53aa394
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  8ab57301-3088-47b3-a20e-63b560663e6e: !Template
+    answer_choices: null
+    id: 8ab57301-3088-47b3-a20e-63b560663e6e
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  99a49ccf-19da-4697-a2bd-e4829f52695d: !Template
+    answer_choices: null
+    id: 99a49ccf-19da-4697-a2bd-e4829f52695d
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  b3e6b0df-2038-4b1f-a8b0-37ff3baf8dec: !Template
+    answer_choices: null
+    id: b3e6b0df-2038-4b1f-a8b0-37ff3baf8dec
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  e511b117-2551-45bc-a14f-b00a314890c1: !Template
+    answer_choices: null
+    id: e511b117-2551-45bc-a14f-b00a314890c1
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  ef055cf3-a989-4a2b-b453-b114e569b41b: !Template
+    answer_choices: null
+    id: ef055cf3-a989-4a2b-b453-b114e569b41b
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
diff --git a/promptsource/templates/scan/simple/templates.yaml b/promptsource/templates/scan/simple/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0bfed53bad724853b3aa540c1801428222d049b2
--- /dev/null
+++ b/promptsource/templates/scan/simple/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: simple
+templates:
+  04d8f808-3d03-4086-ac88-56f63c00a723: !Template
+    answer_choices: null
+    id: 04d8f808-3d03-4086-ac88-56f63c00a723
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  25007fa4-a6cb-4732-a780-1fab772ac051: !Template
+    answer_choices: null
+    id: 25007fa4-a6cb-4732-a780-1fab772ac051
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  2b56f443-6917-4e06-9c19-0968497c9f2c: !Template
+    answer_choices: null
+    id: 2b56f443-6917-4e06-9c19-0968497c9f2c
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  460ae7d4-cd96-4354-8e44-5fe0b62b4b4a: !Template
+    answer_choices: null
+    id: 460ae7d4-cd96-4354-8e44-5fe0b62b4b4a
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  560c4958-02cd-487d-a460-4648fd300b73: !Template
+    answer_choices: null
+    id: 560c4958-02cd-487d-a460-4648fd300b73
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  71cd97ad-3151-4bf2-8d51-6b3445e9fc02: !Template
+    answer_choices: null
+    id: 71cd97ad-3151-4bf2-8d51-6b3445e9fc02
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  760a66cc-00ae-4bb4-99fa-b0e3bb185722: !Template
+    answer_choices: null
+    id: 760a66cc-00ae-4bb4-99fa-b0e3bb185722
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  806248c1-7e92-4be7-91f2-eae0e94efdf2: !Template
+    answer_choices: null
+    id: 806248c1-7e92-4be7-91f2-eae0e94efdf2
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  8dcd7908-9912-412b-b293-c2c412832f41: !Template
+    answer_choices: null
+    id: 8dcd7908-9912-412b-b293-c2c412832f41
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  a3ed2876-edf9-4d1f-b39a-ef42d0e7d06f: !Template
+    answer_choices: null
+    id: a3ed2876-edf9-4d1f-b39a-ef42d0e7d06f
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  a4b5c0f1-11fb-436a-84a8-094d99962cf2: !Template
+    answer_choices: null
+    id: a4b5c0f1-11fb-436a-84a8-094d99962cf2
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  c6bcf1af-2f4d-4f46-b4a0-3b4a51ddb33a: !Template
+    answer_choices: null
+    id: c6bcf1af-2f4d-4f46-b4a0-3b4a51ddb33a
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
diff --git a/promptsource/templates/scan/template_around_right/templates.yaml b/promptsource/templates/scan/template_around_right/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a0103609d1a398d5d8edd915058742a9862eba6e
--- /dev/null
+++ b/promptsource/templates/scan/template_around_right/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: template_around_right
+templates:
+  203e0677-6009-46c1-ab32-7ee5c05bce43: !Template
+    answer_choices: null
+    id: 203e0677-6009-46c1-ab32-7ee5c05bce43
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  23c479d0-5170-4d2b-bef4-15436fd79daf: !Template
+    answer_choices: null
+    id: 23c479d0-5170-4d2b-bef4-15436fd79daf
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  27a67c54-b283-41b5-b773-179e0a71e91b: !Template
+    answer_choices: null
+    id: 27a67c54-b283-41b5-b773-179e0a71e91b
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  352a41e2-1d15-4570-94af-0bcd6b3169a4: !Template
+    answer_choices: null
+    id: 352a41e2-1d15-4570-94af-0bcd6b3169a4
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  690254be-0121-42a4-b165-a47d17df1de8: !Template
+    answer_choices: null
+    id: 690254be-0121-42a4-b165-a47d17df1de8
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  724aa06a-e716-4e20-9b5c-6edca247d02b: !Template
+    answer_choices: null
+    id: 724aa06a-e716-4e20-9b5c-6edca247d02b
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  923c6871-486a-4203-8529-43cb76cbe31b: !Template
+    answer_choices: null
+    id: 923c6871-486a-4203-8529-43cb76cbe31b
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  a2727d4a-bdce-4054-805f-52281d7a8a3d: !Template
+    answer_choices: null
+    id: a2727d4a-bdce-4054-805f-52281d7a8a3d
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  b5172bc9-7d87-432b-a62c-a0c09366bbbf: !Template
+    answer_choices: null
+    id: b5172bc9-7d87-432b-a62c-a0c09366bbbf
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  d3a97f7b-e2bd-4211-b4d2-7184693254b5: !Template
+    answer_choices: null
+    id: d3a97f7b-e2bd-4211-b4d2-7184693254b5
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  e0005a33-7992-49cf-93ff-0b10951cbb5f: !Template
+    answer_choices: null
+    id: e0005a33-7992-49cf-93ff-0b10951cbb5f
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
+  edab23c8-b883-42ba-ac2b-4e52e6ec072b: !Template
+    answer_choices: null
+    id: edab23c8-b883-42ba-ac2b-4e52e6ec072b
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
diff --git a/promptsource/templates/scan/template_jump_around_right/templates.yaml b/promptsource/templates/scan/template_jump_around_right/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0545b964af04491e77dae8ed40573d54088c2381
--- /dev/null
+++ b/promptsource/templates/scan/template_jump_around_right/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: template_jump_around_right
+templates:
+  0ebef2a6-98d6-427f-8e17-40469e09a123: !Template
+    answer_choices: null
+    id: 0ebef2a6-98d6-427f-8e17-40469e09a123
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  14c6b50f-61ee-4e59-b4c5-94a3b08aa71f: !Template
+    answer_choices: null
+    id: 14c6b50f-61ee-4e59-b4c5-94a3b08aa71f
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  37576bed-ad86-4fc5-8216-c3723a9230bb: !Template
+    answer_choices: null
+    id: 37576bed-ad86-4fc5-8216-c3723a9230bb
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  3ea6058b-7c5d-4961-aae5-c51bf0f3f3ce: !Template
+    answer_choices: null
+    id: 3ea6058b-7c5d-4961-aae5-c51bf0f3f3ce
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  40dace9c-9b7c-4532-90e8-c1dacab234e7: !Template
+    answer_choices: null
+    id: 40dace9c-9b7c-4532-90e8-c1dacab234e7
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  42697be5-c37e-4286-8f7f-d103f95c45e0: !Template
+    answer_choices: null
+    id: 42697be5-c37e-4286-8f7f-d103f95c45e0
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  4606956a-1ed4-49e4-b4a8-fb2c42384b84: !Template
+    answer_choices: null
+    id: 4606956a-1ed4-49e4-b4a8-fb2c42384b84
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  719d61da-8e76-42fe-892f-14aa7fe6c74a: !Template
+    answer_choices: null
+    id: 719d61da-8e76-42fe-892f-14aa7fe6c74a
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
+  7662bd89-0c24-4615-9c74-a8c96cebf6bb: !Template
+    answer_choices: null
+    id: 7662bd89-0c24-4615-9c74-a8c96cebf6bb
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  975bb40c-098f-47d7-9880-7cad69bd25e6: !Template
+    answer_choices: null
+    id: 975bb40c-098f-47d7-9880-7cad69bd25e6
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  aaf3642e-cc85-4b8b-8702-83bd82d6cabb: !Template
+    answer_choices: null
+    id: aaf3642e-cc85-4b8b-8702-83bd82d6cabb
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  e43626a9-53b4-4cac-a81d-6d40d482e31d: !Template
+    answer_choices: null
+    id: e43626a9-53b4-4cac-a81d-6d40d482e31d
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
diff --git a/promptsource/templates/scan/template_opposite_right/templates.yaml b/promptsource/templates/scan/template_opposite_right/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7beff60c981065e72feae93ec82f0352a42583f6
--- /dev/null
+++ b/promptsource/templates/scan/template_opposite_right/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: template_opposite_right
+templates:
+  0ee0c194-a7e2-45be-a28e-41b7e140b648: !Template
+    answer_choices: null
+    id: 0ee0c194-a7e2-45be-a28e-41b7e140b648
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
+  2fa5aaee-0149-480a-94ff-d1b04fe1fed9: !Template
+    answer_choices: null
+    id: 2fa5aaee-0149-480a-94ff-d1b04fe1fed9
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  66a0115e-bd4b-424c-ac47-95272ff11e77: !Template
+    answer_choices: null
+    id: 66a0115e-bd4b-424c-ac47-95272ff11e77
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  7342961f-2362-4a01-9a36-47d1e2c81781: !Template
+    answer_choices: null
+    id: 7342961f-2362-4a01-9a36-47d1e2c81781
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  836c4b28-dddb-4e02-9c65-3802272a48da: !Template
+    answer_choices: null
+    id: 836c4b28-dddb-4e02-9c65-3802272a48da
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  a4e58261-132a-41c2-93a8-b18cfb862860: !Template
+    answer_choices: null
+    id: a4e58261-132a-41c2-93a8-b18cfb862860
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  bc044fc3-9b52-47ce-a763-874b599264db: !Template
+    answer_choices: null
+    id: bc044fc3-9b52-47ce-a763-874b599264db
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  bd5ea3b7-f313-496d-81f3-730571569455: !Template
+    answer_choices: null
+    id: bd5ea3b7-f313-496d-81f3-730571569455
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
+  d1e094b1-7870-4c8e-aa2f-b0bfd127d9fa: !Template
+    answer_choices: null
+    id: d1e094b1-7870-4c8e-aa2f-b0bfd127d9fa
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  d79cad67-1ee4-43c2-9557-aacd17613524: !Template
+    answer_choices: null
+    id: d79cad67-1ee4-43c2-9557-aacd17613524
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  d7c8e8e2-c306-4242-bbe5-58dffb18d1aa: !Template
+    answer_choices: null
+    id: d7c8e8e2-c306-4242-bbe5-58dffb18d1aa
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  fcc3a325-5a96-4b59-b0d0-ec229db5a634: !Template
+    answer_choices: null
+    id: fcc3a325-5a96-4b59-b0d0-ec229db5a634
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
diff --git a/promptsource/templates/scan/template_right/templates.yaml b/promptsource/templates/scan/template_right/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9b74ed754892a9956ff44a3b9618a3306e36b918
--- /dev/null
+++ b/promptsource/templates/scan/template_right/templates.yaml
@@ -0,0 +1,344 @@
+dataset: scan
+subset: template_right
+templates:
+  08e8efe7-1e88-4acd-8eca-2ae37aefe33e: !Template
+    answer_choices: null
+    id: 08e8efe7-1e88-4acd-8eca-2ae37aefe33e
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+
+      Hint:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: translate_with_hint
+    reference: ''
+  0a4f0503-40e3-42ac-9b8b-36b1d9e54433: !Template
+    answer_choices: null
+    id: 0a4f0503-40e3-42ac-9b8b-36b1d9e54433
+    jinja: '{{ commands }}
+
+
+      Given the commands above, what is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom
+    reference: ''
+  144a6a8c-1404-4f3e-b18c-487e20adf9ee: !Template
+    answer_choices: null
+    id: 144a6a8c-1404-4f3e-b18c-487e20adf9ee
+    jinja: 'Given the commands: {{ commands }}
+
+
+      Produce the corresponding correct sequence of actions
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_mix
+    reference: ''
+  2026f684-e30c-4260-9b56-843bd1b22d29: !Template
+    answer_choices: null
+    id: 2026f684-e30c-4260-9b56-843bd1b22d29
+    jinja: 'Given the following commands: {{ commands }}
+
+
+      What is the corresponding correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_mix
+    reference: ''
+  3a802640-ed81-427a-8ee5-7617667bc663: !Template
+    answer_choices: null
+    id: 3a802640-ed81-427a-8ee5-7617667bc663
+    jinja: 'Please translate correctly the following commands in natural language
+      in the corresponding SCAN actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: translate
+    reference: ''
+  44c0b2cc-2bb1-448a-b8aa-a9fb95958710: !Template
+    answer_choices: null
+    id: 44c0b2cc-2bb1-448a-b8aa-a9fb95958710
+    jinja: 'Natural language commands: {{ commands }}
+
+
+
+      SCAN actions: |||{{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: plain
+    reference: ''
+  689d1086-e9e3-4cc9-b218-5475fe40c636: !Template
+    answer_choices: null
+    id: 689d1086-e9e3-4cc9-b218-5475fe40c636
+    jinja: 'Given the commands below, what is the corresponding correct sequence of
+      actions?
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_beginning
+    reference: ''
+  9c514c2c-fbc8-4bf7-b183-c7902d562869: !Template
+    answer_choices: null
+    id: 9c514c2c-fbc8-4bf7-b183-c7902d562869
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands-to-actions mapping and the commands above, produce the corresponding
+      correct sequence of actions.
+
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_top_with_hint
+    reference: ''
+  cafcded1-92d7-429f-ab8c-e14ee12fbfed: !Template
+    answer_choices: null
+    id: cafcded1-92d7-429f-ab8c-e14ee12fbfed
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+
+      Hereafter a hint on how to translate each command to the corresponding action:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: affirmative_bottom_with_hint
+    reference: ''
+  d077b844-8102-4100-a7e8-fdd6a239764f: !Template
+    answer_choices: null
+    id: d077b844-8102-4100-a7e8-fdd6a239764f
+    jinja: 'Mapping commands to actions:
+
+
+      {{ "walk: I_WALK"}}
+
+
+      {{ "run: I_RUN"}}
+
+
+      {{ "jump: I_JUMP"}}
+
+
+      {{ "look: I_LOOK"}}
+
+
+      {{ "turn left: I_TURN_LEFT"}}
+
+
+      {{ "turn right: I_TURN_RIGHT"}}
+
+
+      {{ "turn opposite left: I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn opposite right: I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      {{ "turn around left: I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT I_TURN_LEFT"}}
+
+
+      {{ "turn around right: I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT I_TURN_RIGHT"}}
+
+
+      Commands: {{ commands }}
+
+
+      Given the commands above and the commands-to-actions mapping, what is the corresponding
+      correct sequence of actions?
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: interrogative_bottom_with_hint
+    reference: ''
+  d3fb2d3a-08a2-4aa9-9b28-7457a46a8b99: !Template
+    answer_choices: null
+    id: d3fb2d3a-08a2-4aa9-9b28-7457a46a8b99
+    jinja: '{{ commands }}
+
+
+      Given the commands above, produce the corresponding correct sequence of actions.
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom
+    reference: ''
+  f98eecc8-8eac-4a85-8df1-ef6173e43a9e: !Template
+    answer_choices: null
+    id: f98eecc8-8eac-4a85-8df1-ef6173e43a9e
+    jinja: 'Given the commands below, please produce the corresponding correct sequence
+      of actions.
+
+
+      {{ commands }}
+
+      |||
+
+      {{ actions }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_beginning
+    reference: ''
diff --git a/promptsource/templates/scicite/templates.yaml b/promptsource/templates/scicite/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..12fc16ec508aaf0077c30a22fa3ef6d5a54a0064
--- /dev/null
+++ b/promptsource/templates/scicite/templates.yaml
@@ -0,0 +1,117 @@
+dataset: scicite
+templates:
+  113a4e9e-7f59-4963-89a3-c1c647acaf2b: !Template
+    answer_choices: A ||| C ||| B
+    id: 113a4e9e-7f59-4963-89a3-c1c647acaf2b
+    jinja: 'Consider the following citation from a scientific paper:
+
+      {{ string }}
+
+      It came from a section titled:
+
+      {{sectionName}}.
+
+      Is this citation describing
+
+
+      A: a {{"method"}}
+
+
+      B: a {{"result"}}
+
+
+      C: {{"background"}}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Classify intent w/section (select choice)
+    reference: ''
+  359418b3-2425-4968-b428-ecb5d60b3b4e: !Template
+    answer_choices: Method ||| Background ||| Result
+    id: 359418b3-2425-4968-b428-ecb5d60b3b4e
+    jinja: 'Is the following citation from a scientific paper describing a {{"method"}},
+      a {{"result"}}, or {{"background"}}?:
+
+      {{ string }}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Classify intent (choices first)
+    reference: ''
+  36a77a57-31e5-48d3-a9b8-e8b8db5fe334: !Template
+    answer_choices: A ||| C ||| B
+    id: 36a77a57-31e5-48d3-a9b8-e8b8db5fe334
+    jinja: 'Consider the following citation from a scientific paper:
+
+      {{ string }}
+
+      Is this citation describing
+
+
+      A: a {{"method"}}
+
+
+      B: a {{"result"}}
+
+
+      C: {{"background"}}
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Classify intent (select choice)
+    reference: ''
+  b917ab5b-3e33-48ee-a319-ccca6af58cd5: !Template
+    answer_choices: Method ||| Background ||| Result
+    id: b917ab5b-3e33-48ee-a319-ccca6af58cd5
+    jinja: 'Consider the following citation from a scientific paper:
+
+      {{ string }}
+
+      Is this citation describing a {{"method"}}, a {{"result"}}, or {{"background"}}?
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Classify intent
+    reference: ''
+  f63606d8-7168-4201-a2bc-e48a442540ac: !Template
+    answer_choices: Method ||| Background ||| Result
+    id: f63606d8-7168-4201-a2bc-e48a442540ac
+    jinja: 'Consider the following citation from a scientific paper:
+
+      {{ string }}
+
+      It came from a section titled:
+
+      {{sectionName}}.
+
+      Is this citation describing a {{"method"}}, a {{"result"}}, or {{"background"}}?
+
+      |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Classify intent w/section
+    reference: ''
diff --git a/promptsource/templates/scientific_papers/arxiv/templates.yaml b/promptsource/templates/scientific_papers/arxiv/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f4c9fede47318225fb16e42331a49df3fca0ea3e
--- /dev/null
+++ b/promptsource/templates/scientific_papers/arxiv/templates.yaml
@@ -0,0 +1,81 @@
+dataset: scientific_papers
+subset: arxiv
+templates:
+  6f268cad-7bdd-4ca2-a647-18ac04d0d422: !Template
+    answer_choices: null
+    id: 6f268cad-7bdd-4ca2-a647-18ac04d0d422
+    jinja: 'Write the first line of an abstract of a paper which starts with: {{ article.strip().split(''\n'')[:3]|join(''\n'')
+      }}
+
+      |||
+
+      {{ abstract.strip().split(''\n'')[0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: generate_first_line_abstract_from_first_three_lines_article
+    reference: Given the first three lines of an article, write the first line of
+      abstract.
+  a13472ad-df38-469a-85a8-a4e1ed58bc87: !Template
+    answer_choices: null
+    id: a13472ad-df38-469a-85a8-a4e1ed58bc87
+    jinja: 'What would be the first line of scientific article for the following abstract:
+      {{ abstract }}
+
+      |||
+
+      {{ article.strip().split(''\n'')[0] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: generate_first_line_from_abstract
+    reference: Given the abstract, generate the first line of the article.
+  ab3e65ab-0935-497c-a6d0-61ad31e5a1a7: !Template
+    answer_choices: null
+    id: ab3e65ab-0935-497c-a6d0-61ad31e5a1a7
+    jinja: 'Write the first 100 words for a scientific article with following abstract
+      : {{ abstract }}
+
+      |||
+
+      {{ article.strip().split('' '')[:100] |join('' '')}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: generate_first_100_words_for_article
+    reference: Given the abstract, generate the first 100 words of the article.
+  ed38e74f-2a5f-4b27-a1f9-0331c6ee8b29: !Template
+    answer_choices: null
+    id: ed38e74f-2a5f-4b27-a1f9-0331c6ee8b29
+    jinja: 'Generate the section names for a scientific article with the abstract:
+      {{ abstract }}
+
+      |||
+
+      {{ section_names.strip().split(''\n'')|join('', '')}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: get_section_names_from_abstract
+    reference: Given the abstract, get the section names.
+  fb501d80-9e93-4a7f-b66c-69b98ac0347f: !Template
+    answer_choices: null
+    id: fb501d80-9e93-4a7f-b66c-69b98ac0347f
+    jinja: 'Prepare the first three sentences of a scientific article for the following
+      abstract:
+
+      {{abstract}}
+
+      |||
+
+      {{article.strip().split(''\n'')[:3]|join("\n")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: generate_first_three_sentences_of_an_article_from_abstract
+    reference: Given the abstract, generate first three sentences of the article.
diff --git a/promptsource/templates/scientific_papers/pubmed/templates.yaml b/promptsource/templates/scientific_papers/pubmed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4ae878482d89669fac2acdf9c8f93d163386b81b
--- /dev/null
+++ b/promptsource/templates/scientific_papers/pubmed/templates.yaml
@@ -0,0 +1,81 @@
+dataset: scientific_papers
+subset: pubmed
+templates:
+  7b54d0d8-ea64-4828-bb1c-a12fd3162c3f: !Template
+    answer_choices: null
+    id: 7b54d0d8-ea64-4828-bb1c-a12fd3162c3f
+    jinja: 'Generate the section names for a scientific article with the abstract:
+      {{ abstract }}
+
+      |||
+
+      {{ section_names.strip().split(''\n'')|join('', '')}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: get_section_names_from_abstract
+    reference: Given the abstract, get the section names.
+  7ef28be4-382c-46ee-9fd5-1c4d83aab433: !Template
+    answer_choices: null
+    id: 7ef28be4-382c-46ee-9fd5-1c4d83aab433
+    jinja: 'Prepare the first three sentences of a scientific article for the following
+      abstract:
+
+      {{abstract}}
+
+      |||
+
+      {{article.strip().split(''\n'')[:3]|join("\n")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: generate_first_three_sentences_of_an_article_from_abstract
+    reference: Given the abstract, generate first three sentences of the article.
+  a34723df-0c10-4553-8323-99c4cfb53544: !Template
+    answer_choices: null
+    id: a34723df-0c10-4553-8323-99c4cfb53544
+    jinja: 'What would be the first line of scientific article for the following abstract:
+      {{ abstract }}
+
+      |||
+
+      {{ article.strip().split(''\n'')[0] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: generate_first_line_from_abstract
+    reference: Given the abstract, generate the first line of the article.
+  a8992de1-443f-4f6a-983a-99e10a34b328: !Template
+    answer_choices: null
+    id: a8992de1-443f-4f6a-983a-99e10a34b328
+    jinja: 'Write the first 100 words for a scientific article with following abstract
+      : {{ abstract }}
+
+      |||
+
+      {{ article.strip().split('' '')[:100] |join('' '')}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: generate_first_100_words_for_article
+    reference: Given the abstract, generate the first 100 words of the article.
+  ff694788-2e84-49e7-8df3-2665b8c687f1: !Template
+    answer_choices: null
+    id: ff694788-2e84-49e7-8df3-2665b8c687f1
+    jinja: 'Write the first line of an abstract of a paper which starts with: {{ article.strip().split(''\n'')[:3]|join(''\n'')
+      }}
+
+      |||
+
+      {{ abstract.strip().split(''\n'')[0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: generate_first_line_abstract_from_first_three_lines_article
+    reference: Given the first three lines of an article, write the first line of
+      abstract.
diff --git a/promptsource/templates/sciq/templates.yaml b/promptsource/templates/sciq/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f81e7454eaa968f083ef31981e38e28d4a691564
--- /dev/null
+++ b/promptsource/templates/sciq/templates.yaml
@@ -0,0 +1,90 @@
+dataset: sciq
+templates:
+  0af52ad2-2b12-4700-b664-cd26d2da6dc1: !Template
+    answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+    id: 0af52ad2-2b12-4700-b664-cd26d2da6dc1
+    jinja: 'Q: {{question}}
+
+
+
+      A:|||{{answer_choices[3]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Direct Question (Closed Book)
+    reference: ''
+  15b0a989-84e4-4f1c-8ac1-12dbfa2ff42a: !Template
+    answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+    id: 15b0a989-84e4-4f1c-8ac1-12dbfa2ff42a
+    jinja: "{% set order = [[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 1, 3], [0, 2, 3, 1],\
+      \ [0, 3, 1, 2], [0, 3, 2, 1],\n                             [1, 0, 2, 3], [1,\
+      \ 0, 3, 2], [1, 2, 0, 3], [1, 2, 3, 0], [1, 3, 0, 2], [1, 3, 2, 0],\n      \
+      \                       [2, 1, 0, 3], [2, 1, 0, 2], [2, 0, 1, 3], [2, 0, 3,\
+      \ 1], [2, 3, 1, 0], [2, 3, 0, 1],\n                             [3, 1, 2, 0],\
+      \ [3, 1, 0, 2], [3, 2, 1, 0], [3, 2, 0, 1], [3, 0, 1, 2], [3, 0, 2, 1]] | choice\
+      \ %}\nQ: {{question}}\n\n\n Choices:\n\n- {{ answer_choices[order[0]] }}\n\n\
+      - {{ answer_choices[order[1]] }}\n\n- {{ answer_choices[order[2]] }}\n\n- {{\
+      \ answer_choices[order[3]] }}\n\nA:|||{{answer_choices[3]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Multiple Choice (Closed Book)
+    reference: Same multiple choice format but without the support text
+  368e29fb-506d-4a4e-ac33-0af8d6e1729b: !Template
+    answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+    id: 368e29fb-506d-4a4e-ac33-0af8d6e1729b
+    jinja: "{% set order = [[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 1, 3], [0, 2, 3, 1],\
+      \ [0, 3, 1, 2], [0, 3, 2, 1],\n                             [1, 0, 2, 3], [1,\
+      \ 0, 3, 2], [1, 2, 0, 3], [1, 2, 3, 0], [1, 3, 0, 2], [1, 3, 2, 0],\n      \
+      \                       [2, 1, 0, 3], [2, 1, 0, 2], [2, 0, 1, 3], [2, 0, 3,\
+      \ 1], [2, 3, 1, 0], [2, 3, 0, 1],\n                             [3, 1, 2, 0],\
+      \ [3, 1, 0, 2], [3, 2, 1, 0], [3, 2, 0, 1], [3, 0, 1, 2], [3, 0, 2, 1]] | choice\
+      \ %}\nQ: {{question}}\n\n\nRead this paragraph and choose the correct option\
+      \ from the provided answers:\n\n{{support}}\n\n Choices:\n\n- {{ answer_choices[order[0]]\
+      \ }}\n\n- {{ answer_choices[order[1]] }}\n\n- {{ answer_choices[order[2]] }}\n\
+      \n- {{ answer_choices[order[3]] }}\n\n\nA:|||{{answer_choices[3]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Multiple Choice Question First
+    reference: Multiple choice question format
+  63c22e8a-7029-4ce3-bd26-6ca6a1541563: !Template
+    answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+    id: 63c22e8a-7029-4ce3-bd26-6ca6a1541563
+    jinja: "{% set order = [[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 1, 3], [0, 2, 3, 1],\
+      \ [0, 3, 1, 2], [0, 3, 2, 1],\n                             [1, 0, 2, 3], [1,\
+      \ 0, 3, 2], [1, 2, 0, 3], [1, 2, 3, 0], [1, 3, 0, 2], [1, 3, 2, 0],\n      \
+      \                       [2, 1, 0, 3], [2, 1, 0, 2], [2, 0, 1, 3], [2, 0, 3,\
+      \ 1], [2, 3, 1, 0], [2, 3, 0, 1],\n                             [3, 1, 2, 0],\
+      \ [3, 1, 0, 2], [3, 2, 1, 0], [3, 2, 0, 1], [3, 0, 1, 2], [3, 0, 2, 1]] | choice\
+      \ %}\nAnswer the following question given this paragraph: \n\n{{support}}\n\n\
+      \nQ: {{question}}\n\n Choices:\n\n- {{ answer_choices[order[0]] }}\n\n- {{ answer_choices[order[1]]\
+      \ }}\n\n- {{ answer_choices[order[2]] }}\n\n- {{ answer_choices[order[3]] }}\n\
+      \nA:|||{{answer_choices[3]}}\n\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Multiple Choice
+    reference: Standard multiple choice format
+  d417fcfb-9f00-4186-95d8-e63609495164: !Template
+    answer_choices: '{{distractor1}} ||| {{distractor2}} ||| {{distractor3}} ||| {{correct_answer}}'
+    id: d417fcfb-9f00-4186-95d8-e63609495164
+    jinja: "Answer the following question given this paragraph: \n\n{{support}}\n\n\
+      \nQ: {{question}}\n\n\nA:|||{{answer_choices[3]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Direct Question
+    reference: ''
diff --git a/promptsource/templates/scitail/snli_format/templates.yaml b/promptsource/templates/scitail/snli_format/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7cffcc8700beb09c9e32a9ebeadad33cb03388e7
--- /dev/null
+++ b/promptsource/templates/scitail/snli_format/templates.yaml
@@ -0,0 +1,25 @@
+dataset: scitail
+subset: snli_format
+templates:
+  90827988-2a8d-4ecb-b8c1-54ad6cd0ebfa: !Template
+    answer_choices: yes ||| no
+    id: 90827988-2a8d-4ecb-b8c1-54ad6cd0ebfa
+    jinja: 'Given that {{sentence1}} Does it follow that {{sentence2}}
+
+      {{ answer_choices | join('' or '') }}?
+
+      |||{% if gold_label == "entailment" %}
+
+      {{answer_choices[0]}}
+
+      {% else %}
+
+      {{answer_choices[1]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: Another Yes/No Entailment Framing
+    reference: ''
diff --git a/promptsource/templates/scitail/tsv_format/templates.yaml b/promptsource/templates/scitail/tsv_format/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1829c17118cb6b7e3f56fad32cbfa6d2bb61d26f
--- /dev/null
+++ b/promptsource/templates/scitail/tsv_format/templates.yaml
@@ -0,0 +1,67 @@
+dataset: scitail
+subset: tsv_format
+templates:
+  189ed384-c077-49ad-b606-ed08b66f8376: !Template
+    answer_choices: true ||| false
+    id: 189ed384-c077-49ad-b606-ed08b66f8376
+    jinja: "{{premise}} Therefore, we are licensed to say that {{hypothesis}}  {{\
+      \ answer_choices | join(' or ') }}|||\n{% if label == \"entails\" %} \n{{answer_choices[0]}}\n\
+      {% else %}\n{{answer_choices[1]}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "\u2026 Therefore, we're licensed to say that\u2026"
+    reference: ''
+  1ff92b02-fefc-49e0-b676-9391fab8f193: !Template
+    answer_choices: neutral ||| entails
+    id: 1ff92b02-fefc-49e0-b676-9391fab8f193
+    jinja: Suppose {{premise}} Can we infer that {{hypothesis}}? ||| {{label}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "Suppose\u2026 Can we infer that\u2026"
+    reference: ''
+  5aa53544-73a6-4486-b8c8-623345353fa7: !Template
+    answer_choices: yes ||| no
+    id: 5aa53544-73a6-4486-b8c8-623345353fa7
+    jinja: "{{premise}} Does the previous passage support the claim that {{hypothesis}}?\
+      \ |||{% if label == \"entails\" %} \n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "\u2026does the previous passage support the claim that"
+    reference: ''
+  705fa099-0650-4de5-b72f-881aea0fa208: !Template
+    answer_choices: yes ||| no
+    id: 705fa099-0650-4de5-b72f-881aea0fa208
+    jinja: "Given that {{premise}} Does it follow that {{hypothesis}}  {{ answer_choices\
+      \ | join(' or ') }} |||\n{% if label == \"entails\" %} \n{{answer_choices[0]}}\n\
+      {% else %}\n{{answer_choices[1]}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "given\u2026 does it follow that\u2026 "
+    reference: Another yes/no entailment framing
+  9aa89dee-6cef-43bc-bdf4-e38cdf0796a6: !Template
+    answer_choices: yes ||| no
+    id: 9aa89dee-6cef-43bc-bdf4-e38cdf0796a6
+    jinja: "Sentence 1: {{premise}}\n\nSentence 2: {{hypothesis}}\n\nQuestion: Does\
+      \ Sentence 1 entail Sentence 2?  {{ answer_choices | join(' or ') }} |||\n{%\
+      \ if label == \"entails\" %} \n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does S1 entail S2?
+    reference: Adapted from Victor's prompts for XNLI.
diff --git a/promptsource/templates/scitldr/Abstract/templates.yaml b/promptsource/templates/scitldr/Abstract/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..21ed3d4910c7682aa5b52c7f6f9e6c6904a7152c
--- /dev/null
+++ b/promptsource/templates/scitldr/Abstract/templates.yaml
@@ -0,0 +1,61 @@
+dataset: scitldr
+subset: Abstract
+templates:
+  01fb91ab-2c95-436e-9363-3dfcdb6c5ba6: !Template
+    answer_choices: null
+    id: 01fb91ab-2c95-436e-9363-3dfcdb6c5ba6
+    jinja: "Generate a summary for the text: \n{{source | join(\" \")}}\n|||\n{{target[0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic - task description like
+    reference: Assume there is only one choice
+  08b9e913-a305-46e2-aa43-f1126d76cf55: !Template
+    answer_choices: null
+    id: 08b9e913-a305-46e2-aa43-f1126d76cf55
+    jinja: "Elaborate on the given summary: \n{{target |choice}}\n\nStart with following\
+      \ sentence: {{source[0]}}\n|||\n{{source | join(\" \")}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: reverse generation
+    reference: This template asks the model to hallucinate the abstract.
+  16faf5c0-a0c5-488a-89dd-2989622b01dc: !Template
+    answer_choices: null
+    id: 16faf5c0-a0c5-488a-89dd-2989622b01dc
+    jinja: "Compress the abstract to one or two sentences. Make sure it captures the\
+      \ main point of the abstract. \nAbstract: {{source | join(\" \")}}\nSummary:\
+      \ \n|||\n{{target[0]}}\n\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Instructions for summary
+    reference: Providing instructions on what a summary should look like
+  ab46a8f2-1e57-4ac9-b4ae-422c70689450: !Template
+    answer_choices: null
+    id: ab46a8f2-1e57-4ac9-b4ae-422c70689450
+    jinja: '{{source| join(" ")}}
+
+      TL;DR: ||| {{target[0]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: GPT 2 style
+    reference: GPT 2 style template
+  bac2ebcf-a54d-49a0-ac37-e7ad3f4878cb: !Template
+    answer_choices: null
+    id: bac2ebcf-a54d-49a0-ac37-e7ad3f4878cb
+    jinja: "{{source | join(\" \")}}\nPlease summarize the above paragraph. \n|||\n\
+      {{target|choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic with choice
+    reference: basic task like description with choice filter
diff --git a/promptsource/templates/selqa/answer_selection_analysis/templates.yaml b/promptsource/templates/selqa/answer_selection_analysis/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..33e5e7a12c021ddbdeb46a2fb3184978208fd27c
--- /dev/null
+++ b/promptsource/templates/selqa/answer_selection_analysis/templates.yaml
@@ -0,0 +1,75 @@
+dataset: selqa
+subset: answer_selection_analysis
+templates:
+  39f5f57c-50b9-40b3-bb4f-3f0e4fec7776: !Template
+    answer_choices: null
+    id: 39f5f57c-50b9-40b3-bb4f-3f0e4fec7776
+    jinja: '{% set rand_index = range(0,10)|choice %} He asked me "{{ question }}"
+      Is he talking about {{ ["MUSIC", "TV","TRAVEL","ART","SPORT","COUNTRY","MOVIES","HISTORICAL
+      EVENTS","SCIENCE","FOOD"][rand_index]|lower}}? ||| {% if topic == rand_index
+      %}Yes{% else %}No{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: is-he-talking-about
+    reference: ''
+  5354e98d-8aa2-49d0-a50b-fc72a503d7d4: !Template
+    answer_choices: null
+    id: 5354e98d-8aa2-49d0-a50b-fc72a503d7d4
+    jinja: '{% set rand_index = range(0,candidates|length)|choice %} Would it make
+      sense to reply "{{ candidates[rand_index]|trim|trim(''.'') }}" to the question
+      "{{ question }}"? ||| {% if rand_index in answers %}Yes{%else %}No{%endif%}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: would-make-sense-qu-rand
+    reference: ''
+  721463cf-bae4-4a22-bd19-7bdbb0777856: !Template
+    answer_choices: null
+    id: 721463cf-bae4-4a22-bd19-7bdbb0777856
+    jinja: '{% set rand_index = range(0,10)|choice %}{% set rand_index = rand_index
+      - 1 if rand_index == topic else rand_index %}{% set topics = ["MUSIC", "TV","TRAVEL","ART","SPORT","COUNTRY","MOVIES","HISTORICAL
+      EVENTS","SCIENCE","FOOD"]%} What is the topic of the question "{{ question }}"?
+      Is it {{ topics[rand_index]|lower}} or {{ topics[topic]|lower}}? ||| {{ topics[topic]|lower
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: about-topic-vs-random
+    reference: ''
+  9de0a553-63e7-4b67-a6c5-1a15ac0d5483: !Template
+    answer_choices: null
+    id: 9de0a553-63e7-4b67-a6c5-1a15ac0d5483
+    jinja: 'Someone asked me "{{ question }}" I replied "{{ candidates[0] }} Does
+      my answer make sense? ||| {% if 0 in answers %}Yes{%else %}No{%endif%}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: make-sense-0
+    reference: ''
+  c2be1297-cfce-48bd-9ef0-9f46fc898e84: !Template
+    answer_choices: null
+    id: c2be1297-cfce-48bd-9ef0-9f46fc898e84
+    jinja: "{% set rand_val = range(0,candidates|length)|choice %}{% set rand_index\
+      \ = namespace(value=rand_val)%}\n{% for answer in answers|sort(reverse=True)%}\n\
+      \  {% if rand_index.value == answer %}\n  {% set rand_index.value = rand_index.value\
+      \ - 1 %}\n{% endif %}\n{% endfor %}\n{% set response=\"2\" %}{% set real_fake_answers\
+      \ = [candidates[rand_index.value], candidates[answers[0]]] %}\n{% if range(0,2)|choice\
+      \ %}{% set response=\"1\" %}{% set real_fake_answers = [candidates[answers[0]],\
+      \ candidates[rand_index.value]] %}{% endif %}\nThe next question was \"{{ question\
+      \ }}\" Which is the correct answer? 1: \"{{ real_fake_answers|join('\" or 2:\
+      \ \"') }} ||| {{ response }}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: which-answer-1st-vs-random
+    reference: ''
diff --git a/promptsource/templates/sem_eval_2010_task_8/templates.yaml b/promptsource/templates/sem_eval_2010_task_8/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b472a230481dbb06ddd1b8185ebbed63255318da
--- /dev/null
+++ b/promptsource/templates/sem_eval_2010_task_8/templates.yaml
@@ -0,0 +1,128 @@
+dataset: sem_eval_2010_task_8
+templates:
+  202246b0-3f82-42b9-bc8d-d36997b5f2cb: !Template
+    answer_choices: null
+    id: 202246b0-3f82-42b9-bc8d-d36997b5f2cb
+    jinja: "'Given the sentence, {{sentence}}\n\nOut of the options, {{\"Cause Effect\
+      \ e1,e2\"}}, {{\"Cause Effect e2,e1\"}},{{\"Component Whole e1,e2\"}},{{\"Component\
+      \ Whole e2,e1\"}},{{\"Content Container e1,e2\"}},{{\"Content Container e2,e1\"\
+      }},{{\"Entity Destination e1,e2\"}},{{\"Entity Destination e2,e1\"}},{{\"Entity\
+      \ Origin e1,e2\"}},{{\"Entity Origin e2,e1\"}},{{\"Instrument Agency e1,e2\"\
+      }},{{\"Instrument Agency e2,e1\"}},{{\"Member Collection e1,e2\"}},{{\"Member\
+      \ Collection e2,e1\"}},{{\"Message Topic e1,e2\"}},{{\"Message Topic e2,e1\"\
+      }},{{\"Product Producer e1,e2\"}},{{\"Product Producer e2,e1\"}} and {{\"Other\"\
+      }}, \n\nWhat is the semantic relations between the two nominals e1,e2 or e2,e1\
+      \ in the sentence: ||| {{ [\"Cause Effect e1,e2\", \"Cause Effect e2,e1\",\"\
+      Component Whole e1,e2\",\"Component Whole e2,e1\",\"Content Container e1,e2\"\
+      ,\"Content Container e2,e1\",\"Entity Destination e1,e2\",\"Entity Destination\
+      \ e2,e1\",\"Entity Origin e1,e2\",\"Entity Origin e2,e1\",\"Instrument Agency\
+      \ e1,e2\",\"Instrument Agency e2,e1\",\"Member Collection e1,e2\",\"Member Collection\
+      \ e2,e1\",\"Message Topic e1,e2\",\"Message Topic e2,e1\",\"Product Producer\
+      \ e1,e2\",\"Product Producer e2,e1\"\n,\"Other\"] [relation] }}'"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_2
+    reference: out of options
+  5d7123a8-4ed4-42ce-bcfb-4af415962efc: !Template
+    answer_choices: null
+    id: 5d7123a8-4ed4-42ce-bcfb-4af415962efc
+    jinja: '''How semantically related are the two nominals in the sentence, {{sentence}}
+
+
+      Please answer {{"Cause Effect e1,e2"}}, {{"Cause Effect e2,e1"}},{{"Component
+      Whole e1,e2"}},{{"Component Whole e2,e1"}},{{"Content Container e1,e2"}},{{"Content
+      Container e2,e1"}},{{"Entity Destination e1,e2"}},{{"Entity Destination e2,e1"}},{{"Entity
+      Origin e1,e2"}},{{"Entity Origin e2,e1"}},{{"Instrument Agency e1,e2"}},{{"Instrument
+      Agency e2,e1"}},{{"Member Collection e1,e2"}},{{"Member Collection e2,e1"}},{{"Message
+      Topic e1,e2"}},{{"Message Topic e2,e1"}},{{"Product Producer e1,e2"}},{{"Product
+      Producer e2,e1"}} and {{"Other"}}: ||| {{ ["Cause Effect e1,e2", "Cause Effect
+      e2,e1","Component Whole e1,e2","Component Whole e2,e1","Content Container e1,e2","Content
+      Container e2,e1","Entity Destination e1,e2","Entity Destination e2,e1","Entity
+      Origin e1,e2","Entity Origin e2,e1","Instrument Agency e1,e2","Instrument Agency
+      e2,e1","Member Collection e1,e2","Member Collection e2,e1","Message Topic e1,e2","Message
+      Topic e2,e1","Product Producer e1,e2","Product Producer e2,e1"
+
+      ,"Other"] [relation] }}'''
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_3
+    reference: please answer
+  87ca690e-87a7-44d5-b001-a4181482f5c9: !Template
+    answer_choices: null
+    id: 87ca690e-87a7-44d5-b001-a4181482f5c9
+    jinja: '''Given the two nominals (e1, e2) in {{sentence}}
+
+
+      ===
+
+
+      What is the semantic relations between e1 and e2 : ||| {{ ["Cause Effect e1,e2",
+      "Cause Effect e2,e1","Component Whole e1,e2","Component Whole e2,e1","Content
+      Container e1,e2","Content Container e2,e1","Entity Destination e1,e2","Entity
+      Destination e2,e1","Entity Origin e1,e2","Entity Origin e2,e1","Instrument Agency
+      e1,e2","Instrument Agency e2,e1","Member Collection e1,e2","Member Collection
+      e2,e1","Message Topic e1,e2","Message Topic e2,e1","Product Producer e1,e2","Product
+      Producer e2,e1"
+
+      ,"Other"] [relation] }}'''
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_0
+    reference: 'relationship between two nomimals '
+  c97e7bbf-b7f0-4cee-ada5-431ce7d606cc: !Template
+    answer_choices: null
+    id: c97e7bbf-b7f0-4cee-ada5-431ce7d606cc
+    jinja: '''Given the sentence, {{sentence}}
+
+
+      ===
+
+
+      What is the semantic relations between the two nominals e1,e2 or e2,e1 in the
+      sentence: ||| {{ ["Cause Effect e1,e2", "Cause Effect e2,e1","Component Whole
+      e1,e2","Component Whole e2,e1","Content Container e1,e2","Content Container
+      e2,e1","Entity Destination e1,e2","Entity Destination e2,e1","Entity Origin
+      e1,e2","Entity Origin e2,e1","Instrument Agency e1,e2","Instrument Agency e2,e1","Member
+      Collection e1,e2","Member Collection e2,e1","Message Topic e1,e2","Message Topic
+      e2,e1","Product Producer e1,e2","Product Producer e2,e1"
+
+      ,"Other"] [relation] }}'''
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_1
+    reference: mention e1,e2 after
+  d7e88599-da89-4cfd-94e2-65e68c7ef141: !Template
+    answer_choices: null
+    id: d7e88599-da89-4cfd-94e2-65e68c7ef141
+    jinja: '''Sentence: {{sentence}}
+
+
+      Are the two nominals in the sentence sematically related as {{"Cause Effect
+      e1,e2"}}, {{"Cause Effect e2,e1"}},{{"Component Whole e1,e2"}},{{"Component
+      Whole e2,e1"}},{{"Content Container e1,e2"}},{{"Content Container e2,e1"}},{{"Entity
+      Destination e1,e2"}},{{"Entity Destination e2,e1"}},{{"Entity Origin e1,e2"}},{{"Entity
+      Origin e2,e1"}},{{"Instrument Agency e1,e2"}},{{"Instrument Agency e2,e1"}},{{"Member
+      Collection e1,e2"}},{{"Member Collection e2,e1"}},{{"Message Topic e1,e2"}},{{"Message
+      Topic e2,e1"}},{{"Product Producer e1,e2"}},{{"Product Producer e2,e1"}} and
+      {{"Other"}}: ||| {{ ["Cause Effect e1,e2", "Cause Effect e2,e1","Component Whole
+      e1,e2","Component Whole e2,e1","Content Container e1,e2","Content Container
+      e2,e1","Entity Destination e1,e2","Entity Destination e2,e1","Entity Origin
+      e1,e2","Entity Origin e2,e1","Instrument Agency e1,e2","Instrument Agency e2,e1","Member
+      Collection e1,e2","Member Collection e2,e1","Message Topic e1,e2","Message Topic
+      e2,e1","Product Producer e1,e2","Product Producer e2,e1"
+
+      ,"Other"] [relation] }}'''
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_4
+    reference: given nomials
diff --git a/promptsource/templates/sem_eval_2014_task_1/templates.yaml b/promptsource/templates/sem_eval_2014_task_1/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..44cca4c69bc45560d760d1ac2959fb7e54bf707c
--- /dev/null
+++ b/promptsource/templates/sem_eval_2014_task_1/templates.yaml
@@ -0,0 +1,88 @@
+dataset: sem_eval_2014_task_1
+templates:
+  14b0f0c7-0026-466f-8d9e-9dc6c32bf111: !Template
+    answer_choices: No clear answer ||| yes ||| no
+    id: 14b0f0c7-0026-466f-8d9e-9dc6c32bf111
+    jinja: 'Does the premise: "{{premise}}" agree with the hypothesis: "{{hypothesis}}"
+      ? ||| {{answer_choices[entailment_judgment]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: entailment_basic_3
+    reference: ''
+  2aa091cb-02ff-4c8c-964c-4c5e53df8c1b: !Template
+    answer_choices: null
+    id: 2aa091cb-02ff-4c8c-964c-4c5e53df8c1b
+    jinja: "How related are the two sentences : \"{{hypothesis}}\" and \"{{premise}}\"\
+      \ ? Rate it from 1-5. \n||| {{(((10*relatedness_score)|round)/10)}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: relatedness_basic_2
+    reference: ''
+  75203dd2-5ec3-4e91-b95f-228ad9bd2010: !Template
+    answer_choices: neither ||| entailing ||| contradicting
+    id: 75203dd2-5ec3-4e91-b95f-228ad9bd2010
+    jinja: "Sentence 1: \"{{hypothesis}}\" \nSentence 2: \"{{premise}}\"\nAre the\
+      \ two sentences {{\"entailing\"}} or {{\"contradicting\"}} each other?\n|||\
+      \ {{answer_choices[entailment_judgment]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: entailment_basic_2
+    reference: ''
+  892c58fd-64f5-4059-8fb8-c74bc025ff40: !Template
+    answer_choices: Neutral ||| Entailment ||| Contradiction
+    id: 892c58fd-64f5-4059-8fb8-c74bc025ff40
+    jinja: "Given the following hypothesis: {{hypothesis}}.\nAs well as the premise:\
+      \ {{premise}}, \nPredict the Entailment relation between the premise and hypothesis\
+      \ from the labels {{\"Neutral\"}}, {{\"Entailment\"}}, {{ \"Contradiction\"\
+      }} |||\n {{answer_choices[entailment_judgment]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: entailment_basic_1
+    reference: ''
+  91a6b1db-be59-41bd-9eea-73bb7a4e7350: !Template
+    answer_choices: neither entails nor contradicts ||| entails ||| contradicts
+    id: 91a6b1db-be59-41bd-9eea-73bb7a4e7350
+    jinja: 'Given the hypothesis: {{hypothesis}} and the premise: {{premise}}. Out
+      of the options, {{"neither entails nor contradicts"}}, {{"entails"}} and {{
+      "contradicts"}}, the hypothesis ||| {{answer_choices[entailment_judgment]}}
+      the premise.
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: entailment_localization_1
+    reference: ''
+  a58fe8b4-f185-46a9-8fca-6dc66d0812be: !Template
+    answer_choices: null
+    id: a58fe8b4-f185-46a9-8fca-6dc66d0812be
+    jinja: "Given the following hypothesis: {{hypothesis}}.\nAs well as the premise:\
+      \ {{premise}}, \nGive a score on how related the hypothesis and premise was,\
+      \ from the scale 1 to 5, where  1 is completely unrelated and 5 is very related:\
+      \ |||   {{(((10*relatedness_score)|round)/10)}}\n\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: relatedness_basic_1
+    reference: ''
+  d9380ec0-18b3-48b2-99eb-9f9cb47ab7c7: !Template
+    answer_choices: unclear ||| yes ||| no
+    id: d9380ec0-18b3-48b2-99eb-9f9cb47ab7c7
+    jinja: Does {{premise}} imply that {{hypothesis}}?  Please answer yes, no, or
+      unclear. ||| {{answer_choices[entailment_judgment]}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: entailment_basic_4
+    reference: ''
diff --git a/promptsource/templates/sent_comp/templates.yaml b/promptsource/templates/sent_comp/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9374d0b2abb920fa804ca978c4a7cf8e0ecdbfdb
--- /dev/null
+++ b/promptsource/templates/sent_comp/templates.yaml
@@ -0,0 +1,94 @@
+dataset: sent_comp
+templates:
+  185b5001-19e3-47d3-afd3-40f74346f4bb: !Template
+    answer_choices: null
+    id: 185b5001-19e3-47d3-afd3-40f74346f4bb
+    jinja: '''{{graph.sentence}}
+
+
+      ===
+
+
+      Given the above sentence, generate a compressed sentence: ||| {{compression.text}}'''
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_0
+    reference: generate compression
+  336ba469-f315-49ff-8c02-baf6d059972b: !Template
+    answer_choices: null
+    id: 336ba469-f315-49ff-8c02-baf6d059972b
+    jinja: '''{{headline}}
+
+
+      ===
+
+
+      Given the above headline, write one compressed sentence: ||| {{compression.text}}'''
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_6
+    reference: write compression given headline
+  6493cbf3-bce9-4556-92ab-ec815f768eb6: !Template
+    answer_choices: null
+    id: 6493cbf3-bce9-4556-92ab-ec815f768eb6
+    jinja: '''Sentence: {{graph.sentence}}
+
+
+      Compressed sentence: ||| {{compression.text}}'''
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_1
+    reference: 'compressed sentence, sentence '
+  9391497d-4fd1-4977-aba8-dc20f9e9445a: !Template
+    answer_choices: null
+    id: 9391497d-4fd1-4977-aba8-dc20f9e9445a
+    jinja: '''{{graph.sentence}}
+
+
+      Extreme TL;DR: ||| {{compression.text}}'''
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_4
+    reference: extreme TLDR
+  b7b2934c-cf3e-42b9-b7be-d6f1af679bce: !Template
+    answer_choices: null
+    id: b7b2934c-cf3e-42b9-b7be-d6f1af679bce
+    jinja: '''Compress this headline: {{headline}}
+
+
+      compressed sentence: ||| {{compression.text}}'''
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template_5
+    reference: from headline to compressed version
+  ca70c220-b9d8-46fa-8d83-3b9ba9e177c0: !Template
+    answer_choices: null
+    id: ca70c220-b9d8-46fa-8d83-3b9ba9e177c0
+    jinja: "'{{graph.sentence}}\n ===\n Given the above sentence, write one compressed\
+      \ sentence to summarize: ||| {{compression.text}}'"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_2
+    reference: write one compressed sentence
+  f797c3f9-2a93-46a6-8c84-ba4871eba79b: !Template
+    answer_choices: null
+    id: f797c3f9-2a93-46a6-8c84-ba4871eba79b
+    jinja: "'Compress: {{graph.sentence}}|||\n\n  {{compression.text}}'"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template_3
+    reference: compress
diff --git a/promptsource/templates/sick/templates.yaml b/promptsource/templates/sick/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9de8630285cdf7be1a6791b1ef2d2f8931910b03
--- /dev/null
+++ b/promptsource/templates/sick/templates.yaml
@@ -0,0 +1,82 @@
+dataset: sick
+templates:
+  2b5fcfdc-8dc4-4aed-9819-8a104230d0fa: !Template
+    answer_choices: null
+    id: 2b5fcfdc-8dc4-4aed-9819-8a104230d0fa
+    jinja: 'How related are the following sentences?
+
+      Give a score on a scale of 1 to 5.
+
+
+      {{sentence_A}}
+
+
+      {{sentence_B}} |||
+
+
+      {{relatedness_score}}
+
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: related
+    reference: ''
+  566db154-818a-43c6-b66d-924a20fbbec2: !Template
+    answer_choices: null
+    id: 566db154-818a-43c6-b66d-924a20fbbec2
+    jinja: "Does sentence B entail or contradict sentence A?\n\nsentence A: {{sentence_A}}\n\
+      \nsentence B: {{sentence_B}} |||\n{{\n[\n  \"entailment\",\n  \"neutral\",\n\
+      \  \"contradiction\"\n][label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: entailed
+    reference: ''
+  a502cdc1-3bf0-4019-8b4c-b293d75a95ff: !Template
+    answer_choices: null
+    id: a502cdc1-3bf0-4019-8b4c-b293d75a95ff
+    jinja: "Does sentence B, \"{{sentence_B}}\", support sentence A, \"{{sentence_A}}\"\
+      \ ? |||\n{{\n[\n  \"yes, sentence B supports sentence A\",\n  \"neutral\",\n\
+      \  \"no, sentence B contradicts sentence A\"\n][label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: support
+    reference: ''
+  c65e4a05-3252-4f3b-a10f-4fedeb9a61bb: !Template
+    answer_choices: null
+    id: c65e4a05-3252-4f3b-a10f-4fedeb9a61bb
+    jinja: "Does the hypothesis \"{{sentence_B}}\" follow or contradict  the premise\
+      \ \"{{sentence_A}}\"? |||\n{{\n[\n  \"yes, the hypothesis follows the premise\"\
+      ,\n  \"the hypothesis can either follow or contradict the premise\",\n  \"no,\
+      \ the hypothesis contradicts the premise\"\n][label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hypothesis
+    reference: 'Definition of neutral: https://www.aclweb.org/anthology/C18-1199.pdf'
+  eb368955-dc71-45cb-95fe-8d3fee0da819: !Template
+    answer_choices: null
+    id: eb368955-dc71-45cb-95fe-8d3fee0da819
+    jinja: 'What is the entailment of sentences A and B?
+
+
+      {{sentence_A}}
+
+
+      {{sentence_B}} |||
+
+      entailment for the A-B order is {{entailment_AB }} while entailment for the
+      B-A order is {{entailment_BA }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: entailment_AB
+    reference: ''
diff --git a/promptsource/templates/sms_spam/templates.yaml b/promptsource/templates/sms_spam/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..68f1fb1c1b71f2a23619bee87b9091ddbdfdc75b
--- /dev/null
+++ b/promptsource/templates/sms_spam/templates.yaml
@@ -0,0 +1,60 @@
+dataset: sms_spam
+templates:
+  7bab221f-92fc-46b4-8c02-d5f401185f7e: !Template
+    answer_choices: ham ||| spam
+    id: 7bab221f-92fc-46b4-8c02-d5f401185f7e
+    jinja: "What is the label for the following sms message ? {{sms}} \n|||\n{{ answer_choices\
+      \ [label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sms_spam_2
+    reference: ''
+  84cdb14c-f129-461c-83cf-a0a48af3d2ce: !Template
+    answer_choices: null
+    id: 84cdb14c-f129-461c-83cf-a0a48af3d2ce
+    jinja: "Is this sms message considered {{\"ham\"}} ? \n{{sms}}\n|||\n{% if 1-label\
+      \ %}\nTrue\n{% else %}\nFalse\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sms_spam_5
+    reference: ''
+  871415d2-552d-4798-a319-613c3c86d290: !Template
+    answer_choices: ham ||| spam
+    id: 871415d2-552d-4798-a319-613c3c86d290
+    jinja: 'Is the label for the following sms message {{"ham"}} or {{"spam"}} ? {{sms}}
+
+      |||
+
+      {{ answer_choices [label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sms_spam_1
+    reference: ''
+  a38996db-6f24-4412-ab78-fb9265bedd66: !Template
+    answer_choices: ham ||| spam
+    id: a38996db-6f24-4412-ab78-fb9265bedd66
+    jinja: "The following sms message delivers what label ? \n|||\n{{ answer_choices\
+      \ [label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sms_spam_3
+    reference: ''
+  ef8c84e0-d45d-4e5d-b5e2-6ee3a94ce330: !Template
+    answer_choices: null
+    id: ef8c84e0-d45d-4e5d-b5e2-6ee3a94ce330
+    jinja: "Is this sms message considered {{\"spam\"}} ? \n{{sms}}\n|||\n{% if label\
+      \ %}\nTrue\n{% else %}\nFalse\n{% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sms_spam_4
+    reference: ''
diff --git a/promptsource/templates/snips_built_in_intents/templates.yaml b/promptsource/templates/snips_built_in_intents/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6a5de9b357dd3f757cde31e6cda5a9d23778f1ea
--- /dev/null
+++ b/promptsource/templates/snips_built_in_intents/templates.yaml
@@ -0,0 +1,143 @@
+dataset: snips_built_in_intents
+templates:
+  05b95953-c659-4b51-8abc-6a170db93658: !Template
+    answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+      ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+      ||| Book Restaurant ||| Get Directions ||| Share ETA
+    id: 05b95953-c659-4b51-8abc-6a170db93658
+    jinja: "Here is a voice command by a user of a voice assistant: \n\n{{text}}.\n\
+      \nIt can be summarized by one of the following options: \n\n{{\"Compare Places\"\
+      }}, {{\"Request Ride\"}}, {{\"Get Weather\"}}, {{\"Search Place\"}}, {{\"Get\
+      \ Place Details\"}}, {{\"Share Current Location\"}}, {{\"Get Traffic Information\"\
+      }}, {{\"Book Restaurant\"}}, {{\"Get Directions\"}}, {{\"Share ETA\"}}. \n\n\
+      Which one would that be?\n\n|||\n\n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: voice_intent
+    reference: ''
+  069cc4e0-b76e-46be-a592-68a2323e41ea: !Template
+    answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+      ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+      ||| Book Restaurant ||| Get Directions ||| Share ETA
+    id: 069cc4e0-b76e-46be-a592-68a2323e41ea
+    jinja: 'Humans can ask questions or make requests related to one of the following
+      categories: {{"Compare Places"}}, {{"Request Ride"}}, {{"Get Weather"}}, {{"Search
+      Place"}}, {{"Get Place Details"}}, {{"Share Current Location"}}, {{"Get Traffic
+      Information"}}, {{"Book Restaurant"}}, {{"Get Directions"}}, {{"Share ETA"}}
+
+
+      What is the best category for the following request? - "{{text}}"
+
+
+      |||
+
+
+      {{answer_choices[label]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: categorize_query
+    reference: ''
+  0952c001-39fe-4d02-9c3f-54d44bac7694: !Template
+    answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+      ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+      ||| Book Restaurant ||| Get Directions ||| Share ETA
+    id: 0952c001-39fe-4d02-9c3f-54d44bac7694
+    jinja: "{{\"Compare Places\"}}, {{\"Request Ride\"}}, {{\"Get Weather\"}}, {{\"\
+      Search Place\"}}, {{\"Get Place Details\"}}, {{\"Share Current Location\"}},\
+      \ {{\"Get Traffic Information\"}}, {{\"Book Restaurant\"}}, {{\"Get Directions\"\
+      }} and {{\"Share ETA\"}} are possible user intents.\n\nWhich of the above options\
+      \ best captures the intent of the following user message? \n\n{{text}}\n\n|||\n\
+      \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: intent_query
+    reference: ''
+  2a1037cc-a807-493d-bffe-5493c73a425b: !Template
+    answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+      ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+      ||| Book Restaurant ||| Get Directions ||| Share ETA
+    id: 2a1037cc-a807-493d-bffe-5493c73a425b
+    jinja: 'Map this query "{{text}}" to one of these categories - {{"Compare Places"}},
+      {{"Request Ride"}}, {{"Get Weather"}}, {{"Search Place"}}, {{"Get Place Details"}},
+      {{"Share Current Location"}}, {{"Get Traffic Information"}}, {{"Book Restaurant"}},
+      {{"Get Directions"}}, {{"Share ETA"}}
+
+
+      |||
+
+
+      {{answer_choices[label]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: categorize_query_brief
+    reference: ''
+  7dea1614-2d7f-4fee-a7a8-35f9ea12c411: !Template
+    answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+      ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+      ||| Book Restaurant ||| Get Directions ||| Share ETA
+    id: 7dea1614-2d7f-4fee-a7a8-35f9ea12c411
+    jinja: "\"{{text}}\" is a message from a user.\n\nWhich of the following options\
+      \ best captures the intent of the user message written above? \n\n{{\"Compare\
+      \ Places\"}}, {{\"Request Ride\"}}, {{\"Get Weather\"}}, {{\"Search Place\"\
+      }}, {{\"Get Place Details\"}}, {{\"Share Current Location\"}}, {{\"Get Traffic\
+      \ Information\"}}, {{\"Book Restaurant\"}}, {{\"Get Directions\"}}, {{\"Share\
+      \ ETA\"}}\n\n|||\n\n{{ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: query_intent
+    reference: ''
+  9e262c90-a8b7-40d3-896f-74dee67516a7: !Template
+    answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+      ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+      ||| Book Restaurant ||| Get Directions ||| Share ETA
+    id: 9e262c90-a8b7-40d3-896f-74dee67516a7
+    jinja: "\"{{text}}\"\n\nThis message from a user can be summarized by one of these\
+      \ options - {{\"Compare Places\"}}, {{\"Request Ride\"}}, {{\"Get Weather\"\
+      }}, {{\"Search Place\"}}, {{\"Get Place Details\"}}, {{\"Share Current Location\"\
+      }}, {{\"Get Traffic Information\"}}, {{\"Book Restaurant\"}}, {{\"Get Directions\"\
+      }}, {{\"Share ETA\"}}. \n\nWhat would be best option?\n\n|||\n\n\n{{answer_choices[label]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: query_summarize
+    reference: ''
+  b987cb60-6b37-454e-a07e-dfe8b06a70c3: !Template
+    answer_choices: Compare Places ||| Request Ride ||| Get Weather ||| Search Place
+      ||| Get Place Details ||| Share Current Location ||| Get Traffic Information
+      ||| Book Restaurant ||| Get Directions ||| Share ETA
+    id: b987cb60-6b37-454e-a07e-dfe8b06a70c3
+    jinja: '"{{text}}"
+
+
+      Is the best category for the query above {{"Compare Places"}}? Or is it {{"Request
+      Ride"}}? Other category options include {{"Get Weather"}}, {{"Search Place"}},
+      {{"Get Place Details"}}, {{"Share Current Location"}}, {{"Get Traffic Information"}},
+      {{"Book Restaurant"}}, {{"Get Directions"}} and {{"Share ETA"}}. What is the
+      best answer?
+
+
+      |||
+
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: query_options_or
+    reference: ''
diff --git a/promptsource/templates/snli/templates.yaml b/promptsource/templates/snli/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c71322811458614484b4a3adb94b8d9648dcdd2a
--- /dev/null
+++ b/promptsource/templates/snli/templates.yaml
@@ -0,0 +1,87 @@
+dataset: snli
+templates:
+  4b15006d-48de-49e9-970c-6c4206fd4216: !Template
+    answer_choices: True ||| Neither ||| False
+    id: 4b15006d-48de-49e9-970c-6c4206fd4216
+    jinja: '{{premise}}
+
+      Question: {{hypothesis}} True, False, or Neither? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: GPT-3 style
+    reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+      is no task identifying tokens like "anli R1: ".'
+  5f0ef86f-2b8f-4a0c-8bca-a8b8d9ac015e: !Template
+    answer_choices: Yes ||| Neutral ||| No
+    id: 5f0ef86f-2b8f-4a0c-8bca-a8b8d9ac015e
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 entail Sentence 2? Yes, No, or Neutral? |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: does S1 entail S2?
+    reference: Copied from Victor's prompts for XNLI.
+  68afa6d2-547d-4c2f-bcac-71507b1fe778: !Template
+    answer_choices: must be true ||| might be true ||| must be false
+    id: 68afa6d2-547d-4c2f-bcac-71507b1fe778
+    jinja: Given that {{premise}}, it {{"must be true, might be true, or must be false"}}
+      that {{hypothesis}}? ||| It {{ answer_choices[label] }}.
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: "given\u2026 it must be true that\u2026"
+    reference: 'Maybe a little verbose for a generative model, but anecdotally this
+      is the most natural way of how I say an NLI sentence pair out loud to humans.
+      Caveat: NLI annotations are not meant to be strictly truth-conditional entailment,
+      so "must" is not ideal.'
+  94c87dc3-865e-4321-a696-bbc5a54d7096: !Template
+    answer_choices: No ||| Neutral ||| Yes
+    id: 94c87dc3-865e-4321-a696-bbc5a54d7096
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 contradict Sentence 2? Yes, No, or Neutral? |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: does S1 contradict S2?
+    reference: Copied from Victor's prompts for XNLI.
+  b6abcb2e-a0b0-4046-810c-d27f316b1bd5: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: b6abcb2e-a0b0-4046-810c-d27f316b1bd5
+    jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+      ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: "given\u2026 does it follow that\u2026 "
+    reference: "\"Does it follow that\" could be replaced with \"can we infer that\u2026\
+      \ \", \"is it guaranteed that\u2026\", etc. Ideally there should be a question\
+      \ mark after \"does it follow that {hypothesis}?\", but the hypothesis string\
+      \ often comes with ending punctuations of its own."
+  f227d882-7838-4c7a-93fc-c2b68d2464fb: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: f227d882-7838-4c7a-93fc-c2b68d2464fb
+    jinja: '{{premise}} Based on the previous passage, is it true that {{hypothesis}}
+      Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
diff --git a/promptsource/templates/social_i_qa/templates.yaml b/promptsource/templates/social_i_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f294ba7dc8f20d0e48cf613869315bf1d8e10d8f
--- /dev/null
+++ b/promptsource/templates/social_i_qa/templates.yaml
@@ -0,0 +1,133 @@
+dataset: social_i_qa
+templates:
+  605691e9-df59-415d-a622-530734c7df38: !Template
+    answer_choices: '{{answerA}} ||| {{answerB}} ||| {{answerC}}'
+    id: 605691e9-df59-415d-a622-530734c7df38
+    jinja: 'I heard that {{context}}
+
+
+      And I was wondering {{question}}
+
+
+      |||
+
+
+      {{answer_choices[label | int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: I was wondering
+    reference: ''
+  666f415b-e3ac-47bf-a79b-19024c4a4143: !Template
+    answer_choices: '{{answerA}} ||| {{answerB}} ||| {{answerC}}'
+    id: 666f415b-e3ac-47bf-a79b-19024c4a4143
+    jinja: '{{context}}
+
+
+      Given the context: {{question}}
+
+
+      Possible answers: {{answer_choices | join(", ")}}
+
+
+      |||
+
+
+      {{answer_choices[label | int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Show choices and generate answer
+    reference: ''
+  991f78cc-82d3-482f-b1de-f37a7179a316: !Template
+    answer_choices: Yes ||| No
+    id: 991f78cc-82d3-482f-b1de-f37a7179a316
+    jinja: "{% set random_answer_id = range(0,2) | choice%}\n{% set answers = [answerA,\
+      \ answerB, answerC] %}\n{{context}}\n\nGiven the question \"{{question}}\",\
+      \ is \"{{answers[random_answer_id]}}\" a valid answer?\n\n|||\n\n{% if (label\
+      \ | int) - 1 == random_answer_id %}\n    Yes\n{% else %}\n    No\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Check if a random answer is valid or not
+    reference: ''
+  b980667e-b4ca-44ce-aba9-5b47d3ccf406: !Template
+    answer_choices: null
+    id: b980667e-b4ca-44ce-aba9-5b47d3ccf406
+    jinja: '{{context}}
+
+
+      Given that the answer to a question is "{{{"1": answerA, "2": answerB, "3":
+      answerC}[label]}}", what is the question?
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Generate the question from the answer
+    reference: ''
+  cbad777f-5794-4d71-bf3d-54da6043e5f1: !Template
+    answer_choices: '{{answerA}} ||| {{answerB}} ||| {{answerC}}'
+    id: cbad777f-5794-4d71-bf3d-54da6043e5f1
+    jinja: '{{context}}
+
+
+      Given the context: {{question}}
+
+
+      |||
+
+
+      {{answer_choices[label | int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Generate answer
+    reference: ''
+  e2316120-2461-4664-943d-962a85008e23: !Template
+    answer_choices: A ||| B ||| C
+    id: e2316120-2461-4664-943d-962a85008e23
+    jinja: 'Context: {{context}}
+
+
+      Question: {{question}}
+
+
+      Which one of these answers best answers the question according to the context?
+
+
+      A: {{answerA}}
+
+
+      B: {{answerB}}
+
+
+      C: {{answerC}}
+
+
+      |||
+
+
+      {{{"1": "A", "2": "B", "3": "C"}[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Show choices and generate index
+    reference: ''
diff --git a/promptsource/templates/species_800/templates.yaml b/promptsource/templates/species_800/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fc83e9fa6938c9c4832bc8b7022313861b2bcfa1
--- /dev/null
+++ b/promptsource/templates/species_800/templates.yaml
@@ -0,0 +1,341 @@
+dataset: species_800
+templates:
+  28b2d2b9-e844-423b-a9e8-f87cc0fc5b9e: !Template
+    answer_choices: null
+    id: 28b2d2b9-e844-423b-a9e8-f87cc0fc5b9e
+    jinja: "Given the sentence below with tokens separated with the character || ,\
+      \ identify the tokens that are species or organisms of the NCBI Taxonomy (acronyms,\
+      \ common names, abbreviations and scientific names of the species in the NCBI\
+      \ Taxonomy. E.g., Escherichia coli, E. coli). \nIndicate for each token in the\
+      \ sentence {{\"\\\"NCBI Taxonomy token\\\"\"}} if it is an NCBI Taxonomy token,\
+      \ else {{\"\\\"None\\\"\"}} if it is not an NCBI Taxonomy token. Please separate\
+      \ each token with the character || as in the original sentence.\n\nSentence:\
+      \ {{ tokens | join(\" || \")}}\n\nTokens:\n|||\n{% set new_list = [] %}\n{%\
+      \ for ner_tag in ner_tags %}\n{% if ner_tag > 0 %}\n{{ new_list.append(\"NCBI\
+      \ Taxonomy token\")|default(\"\", True) }}\n{% elif ner_tag <= 0%}\n{{ new_list.append(\"\
+      None\")|default(\"\", True) }}\n{% endif %}\n{% endfor %}\n{{ new_list | join(\"\
+      \ || \") }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom_list
+    reference: ''
+  3eaaba97-b0b7-4c00-b3ff-a82e6462c21d: !Template
+    answer_choices: null
+    id: 3eaaba97-b0b7-4c00-b3ff-a82e6462c21d
+    jinja: 'Please write the species of the NCBI Taxonomy mentioned in the text below
+      (acronyms, common names, abbreviations, and scientific names of the species
+      in the NCBI Taxonomy. E.g., Escherichia coli, E. coli). If there is no species
+      answer "None", if there are more species separate them with a comma.
+
+
+      Text: {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+      )", ")")}}
+
+      |||
+
+      {% set diseases = {"list": [], "disease_started": False} %}
+
+      {% set disease_token = ""  %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+      {% set disease_token = tokens[loop.index - 1]  %}
+
+      {{ diseases.list.append(" ") |default("", True)}}
+
+      {{ diseases.list.append((disease_token[0]) + disease_token[1:] if ner_tag ==
+      1 else disease_token) |default("", True)}}
+
+      {% elif diseases.disease_started %}
+
+      {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+      {{ diseases.list.append(",") |default("", True)}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {{diseases.list | join  | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+      )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_top_string
+    reference: ''
+  456329f5-1fd5-4b02-ab35-3c3ec2b731f0: !Template
+    answer_choices: null
+    id: 456329f5-1fd5-4b02-ab35-3c3ec2b731f0
+    jinja: 'Text: {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",")
+      | replace(" ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("(
+      ", "(") | replace(" )", ")")}}
+
+
+      Given the text above, please write the species of the NCBI Taxonomy mentioned
+      in it (acronyms, common names, abbreviations, and scientific names of the species
+      in the NCBI Taxonomy. E.g., Escherichia coli, E. coli). If there is no species
+      answer "None", if there are more species separate them with a comma.
+
+      |||
+
+      {% set diseases = {"list": [], "disease_started": False} %}
+
+      {% set disease_token = ""  %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+      {% set disease_token = tokens[loop.index - 1]  %}
+
+      {{ diseases.list.append(" ") |default("", True)}}
+
+      {{ diseases.list.append((disease_token[0]) + disease_token[1:] if ner_tag ==
+      1 else disease_token) |default("", True)}}
+
+      {% elif diseases.disease_started %}
+
+      {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+      {{ diseases.list.append(",") |default("", True)}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {{diseases.list | join  | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+      )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_bottom_string
+    reference: ''
+  52218e73-f4fe-4181-8af3-d60ec0e0ffd1: !Template
+    answer_choices: null
+    id: 52218e73-f4fe-4181-8af3-d60ec0e0ffd1
+    jinja: 'Text: {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",")
+      | replace(" ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("(
+      ", "(") | replace(" )", ")")}}
+
+
+      Given the text above, can you write all the species of the NCBI Taxonomy mentioned
+      in it?
+
+      An NCBI Taxonomy token can be an acronym, common name, abbreviation, or scientific
+      name of a species in the NCBI Taxonomy (E.g., Escherichia coli, E. coli).
+
+      If there is no species answer "None", if there are more species separate them
+      with a comma.
+
+      |||
+
+      {% set diseases = {"list": [], "disease_started": False} %}
+
+      {% set disease_token = ""  %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+      {% set disease_token = tokens[loop.index - 1]  %}
+
+      {{ diseases.list.append(" ") |default("", True)}}
+
+      {{ diseases.list.append((disease_token[0]) + disease_token[1:] if ner_tag ==
+      1 else disease_token) |default("", True)}}
+
+      {% elif diseases.disease_started %}
+
+      {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+      {{ diseases.list.append(",") |default("", True)}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {{diseases.list | join  | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+      )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom_string
+    reference: ''
+  b6de15a6-61d0-49cc-a31c-0529d54a3ae2: !Template
+    answer_choices: null
+    id: b6de15a6-61d0-49cc-a31c-0529d54a3ae2
+    jinja: 'Given the sentence below with tokens separated with the character || ,
+      can you identify the tokens that are species or organisms of the NCBI Taxonomy?
+
+      An NCBI Taxonomy token can be an acronym, common name, abbreviation, or scientific
+      name of a species in the NCBI Taxonomy (E.g., Escherichia coli, E. coli).
+
+      Please indicate for each token in the sentence {{"\"NCBI Taxonomy token\""}}
+      if it is an NCBI Taxonomy token, else {{"\"None\""}} if it is not an NCBI Taxonomy
+      token. Separate each token with the character || as in the original sentence.
+
+
+      Sentence: {{ tokens | join(" || ")}}
+
+      |||
+
+      {% set new_list = [] %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ new_list.append("NCBI Taxonomy token")|default("", True) }}
+
+      {% elif ner_tag <= 0%}
+
+      {{ new_list.append("None")|default("", True) }}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {{ new_list | join(" || ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_bottom_list
+    reference: ''
+  ce8619cb-ee0a-47b1-93d3-acd32d8ffa6f: !Template
+    answer_choices: null
+    id: ce8619cb-ee0a-47b1-93d3-acd32d8ffa6f
+    jinja: "Sentence: {{ tokens | join(\" || \")}}\n\nGiven the sentence above with\
+      \ tokens separated with the character || , identify the tokens that are species\
+      \ or organisms of the NCBI Taxonomy (acronyms, common names, abbreviations,\
+      \ and scientific names of the species in the NCBI Taxonomy. E.g., Escherichia\
+      \ coli, E. coli). \nIndicate for each token in the sentence {{\"\\\"NCBI Taxonomy\
+      \ token\\\"\"}} if it is an NCBI Taxonomy token, else {{\"\\\"None\\\"\"}} if\
+      \ it is not an NCBI Taxonomy token. Please separate each token with the character\
+      \ || as in the original sentence.\n\n|||\n{% set new_list = [] %}\n{% for ner_tag\
+      \ in ner_tags %}\n{% if ner_tag > 0 %}\n{{ new_list.append(\"NCBI Taxonomy token\"\
+      )|default(\"\", True) }}\n{% elif ner_tag <= 0%}\n{{ new_list.append(\"None\"\
+      )|default(\"\", True) }}\n{% endif %}\n{% endfor %}\n{{ new_list | join(\" ||\
+      \ \") }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: affirmative_top_list
+    reference: ''
+  cfc9b46c-7268-4449-85c4-cc08c3d5aaa9: !Template
+    answer_choices: null
+    id: cfc9b46c-7268-4449-85c4-cc08c3d5aaa9
+    jinja: 'Given the text below, can you write all the species of the NCBI Taxonomy
+      mentioned in it?
+
+
+      An NCBI Taxonomy token can be an acronym, common name, abbreviation, or scientific
+      name of a species in the NCBI Taxonomy (E.g., Escherichia coli, E. coli).
+
+      If there is no species answer "None", if there are more species separate them
+      with a comma.
+
+
+      Text: {{ tokens | join(" ") | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+      )", ")")}}
+
+      |||
+
+      {% set diseases = {"list": [], "disease_started": False} %}
+
+      {% set disease_token = ""  %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ diseases.update({"disease_started": True}) |default("", True)}}
+
+      {% set disease_token = tokens[loop.index - 1]  %}
+
+      {{ diseases.list.append(" ") |default("", True)}}
+
+      {{ diseases.list.append((disease_token[0]) + disease_token[1:] if ner_tag ==
+      1 else disease_token) |default("", True)}}
+
+      {% elif diseases.disease_started %}
+
+      {{ diseases.update({"disease_started": False}) |default("", True)}}
+
+      {{ diseases.list.append(",") |default("", True)}}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {{diseases.list | join  | replace(" .", ".") | replace(" ,", ",") | replace("
+      ;", ";") | replace(" :", ":") | replace(" - ", "-") | replace("( ", "(") | replace("
+      )", ")") | trim(",") if (diseases.list | length) > 0 else "None"}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_top_string
+    reference: ''
+  dd0270f2-77e7-4d31-a806-85ba02934180: !Template
+    answer_choices: null
+    id: dd0270f2-77e7-4d31-a806-85ba02934180
+    jinja: 'Sentence: {{ tokens | join(" || ")}}
+
+
+      Given the sentence above with tokens separated with the character || , can you
+      identify the tokens that are species or organisms of the NCBI Taxonomy?
+
+      An NCBI Taxonomy token can be an acronym, common name, abbreviation, or scientific
+      name of a species in the NCBI Taxonomy (E.g., Escherichia coli, E. coli).
+
+
+      Please indicate for each token in the sentence {{"\"NCBI Taxonomy token\""}}
+      if it is an NCBI Taxonomy token, else {{"\"None\""}} if it is not an NCBI Taxonomy
+      token. Separate each token with the character || as in the original sentence.
+
+
+      |||
+
+      {% set new_list = [] %}
+
+      {% for ner_tag in ner_tags %}
+
+      {% if ner_tag > 0 %}
+
+      {{ new_list.append("NCBI Taxonomy token")|default("", True) }}
+
+      {% elif ner_tag <= 0%}
+
+      {{ new_list.append("None")|default("", True) }}
+
+      {% endif %}
+
+      {% endfor %}
+
+      {{ new_list | join(" || ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: interrogative_top_list
+    reference: ''
diff --git a/promptsource/templates/spider/templates.yaml b/promptsource/templates/spider/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a92186edd267b27b43ae3ad709d9e145b3817fec
--- /dev/null
+++ b/promptsource/templates/spider/templates.yaml
@@ -0,0 +1,105 @@
+dataset: spider
+templates:
+  058ae5d8-cd6b-44d6-ab36-bbe462b5fa1a: !Template
+    answer_choices: null
+    id: 058ae5d8-cd6b-44d6-ab36-bbe462b5fa1a
+    jinja: '"{{query}}" allows me to lookup ||| {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: SQL2text_2
+    reference: ''
+  0f475e13-201f-47e6-824d-728c9296e9c5: !Template
+    answer_choices: null
+    id: 0f475e13-201f-47e6-824d-728c9296e9c5
+    jinja: How would you question someone to get the information returned by the SQL
+      query  "{{query}}" ? ||| {{question}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: SQL2text_4
+    reference: ''
+  1c7edb93-dd92-4e20-a584-e26f0c1976ab: !Template
+    answer_choices: null
+    id: 1c7edb93-dd92-4e20-a584-e26f0c1976ab
+    jinja: The question associated with the SQL query "{{query}}" is ||| {{question}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: SQL2text_3
+    reference: ''
+  363c960a-ef61-44e3-84e0-70a016ff9fac: !Template
+    answer_choices: null
+    id: 363c960a-ef61-44e3-84e0-70a016ff9fac
+    jinja: The natural language translation of the SQL query "{{query}}" is ||| {{question}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: SQL2text_5
+    reference: ''
+  4f63f872-06e2-4503-a440-c1908d5cb642: !Template
+    answer_choices: null
+    id: 4f63f872-06e2-4503-a440-c1908d5cb642
+    jinja: Can you give me the SQL request which corresponds to "{{question}}" ? |||
+      {{query}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: text2SQL_1
+    reference: ''
+  b4168ce2-4854-4c09-a5aa-08cbcb073ab0: !Template
+    answer_choices: null
+    id: b4168ce2-4854-4c09-a5aa-08cbcb073ab0
+    jinja: How would you query a SQL database to answer "{{question}}" ? ||| {{query}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: text2SQL_4
+    reference: ''
+  ba6415e9-9467-4519-8789-57d65524464e: !Template
+    answer_choices: null
+    id: ba6415e9-9467-4519-8789-57d65524464e
+    jinja: The SQL query associated with the question "{{question}}" is ||| {{query}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: text2SQL_3
+    reference: ''
+  cb6182b5-719b-4296-a2e5-783caa661cc2: !Template
+    answer_choices: null
+    id: cb6182b5-719b-4296-a2e5-783caa661cc2
+    jinja: Can you give me the question which corresponds to "{{query}}" ? ||| {{question}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: SQL2text_1
+    reference: ''
+  e843ba1e-09af-48ba-92ca-1b6032404985: !Template
+    answer_choices: null
+    id: e843ba1e-09af-48ba-92ca-1b6032404985
+    jinja: 'To lookup "{{question}}", I can use the SQL query: ||| {{query}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: text2SQL_2
+    reference: ''
+  fe685d52-5e7d-40c5-9945-44aa01058d0f: !Template
+    answer_choices: null
+    id: fe685d52-5e7d-40c5-9945-44aa01058d0f
+    jinja: The SQL translation of the question in natural language "{{question}}"
+      is ||| {{query}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: text2SQL_5
+    reference: ''
diff --git a/promptsource/templates/squad/templates.yaml b/promptsource/templates/squad/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9f9ac01c3ba52f1d57161200cbeb2a3f63dddf9b
--- /dev/null
+++ b/promptsource/templates/squad/templates.yaml
@@ -0,0 +1,220 @@
+dataset: squad
+templates:
+  264f9165-0eed-4855-8825-6508443c64b9: !Template
+    answer_choices: null
+    id: 264f9165-0eed-4855-8825-6508443c64b9
+    jinja: 'I know that the answer to "{{question}}" appears somewhere in the following
+      text snippet. Can you tell me at which character the answer starts?
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: find
+    reference: ''
+  35a1e619-8515-4fee-aeb0-04f2fb7ec584: !Template
+    answer_choices: null
+    id: 35a1e619-8515-4fee-aeb0-04f2fb7ec584
+    jinja: 'At what character does the text "{{answers["text"][0]}}" start in the
+      following paragraph?
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: find text
+    reference: ''
+  3c9c3a3c-da8c-42aa-a605-3063122c32eb: !Template
+    answer_choices: null
+    id: 3c9c3a3c-da8c-42aa-a605-3063122c32eb
+    jinja: '{{question}}
+
+
+      |||
+
+
+      {{answers["text"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: cbqa
+    reference: ''
+  46908111-143b-4ec2-9326-7959739569d8: !Template
+    answer_choices: null
+    id: 46908111-143b-4ec2-9326-7959739569d8
+    jinja: 'I''m creating a final exam for my reading class. Can you please come up
+      with a good question to quiz how well students have read the following text
+      snippet?
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: exam creation help
+    reference: ''
+  5c908ab4-3f07-4f7a-90c2-b3624a96f3e0: !Template
+    answer_choices: null
+    id: 5c908ab4-3f07-4f7a-90c2-b3624a96f3e0
+    jinja: 'I''m working on the final exam for my class and am trying to figure out
+      the answer to the question "{{question}}" I found the following info on Wikipedia
+      and I think it has the answer. Can you tell me the answer?
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["text"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: exam
+    reference: ''
+  7f09d6a1-f4b8-4303-bd76-5e570f2178c1: !Template
+    answer_choices: null
+    id: 7f09d6a1-f4b8-4303-bd76-5e570f2178c1
+    jinja: 'Count the characters up until "{{answers["text"][0]}}" appears in the
+      following chunk of text.
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: count letters
+    reference: ''
+  ad3b868c-5242-4c98-988e-bbe2709ebe6d: !Template
+    answer_choices: null
+    id: ad3b868c-5242-4c98-988e-bbe2709ebe6d
+    jinja: "Q: {{question}}\n\nA: \n|||\n{{answers[\"text\"][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: cbqa qa
+    reference: ''
+  c64ac353-f065-4691-b20c-444fb2bb87bf: !Template
+    answer_choices: null
+    id: c64ac353-f065-4691-b20c-444fb2bb87bf
+    jinja: '{{["Question", "Problem"]  | choice}} {{range(1, 12) | choice}}: {{question}}
+
+
+      Hint: {{context}}
+
+
+      |||
+
+
+      {{answers["text"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: question/hint
+    reference: ''
+  d74d53b5-a3b9-42ae-8753-9dff3fba1c69: !Template
+    answer_choices: null
+    id: d74d53b5-a3b9-42ae-8753-9dff3fba1c69
+    jinja: 'After reading the following paragraph, please answer this question: {{question}}
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["text"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: after
+    reference: ''
+  e575a717-37a5-40c2-a255-af104f485c31: !Template
+    answer_choices: null
+    id: e575a717-37a5-40c2-a255-af104f485c31
+    jinja: 'Please come up with a good question to test reading comprehension about
+      the following paragraph:
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate question
+    reference: ''
+  f3d502bf-d44e-4edf-a27e-d469ba5afbd4: !Template
+    answer_choices: null
+    id: f3d502bf-d44e-4edf-a27e-d469ba5afbd4
+    jinja: "Question: {{question}}\n\nAnswer: \n|||\n{{answers[\"text\"][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: cbqa question answer
+    reference: ''
+  ff80522d-1a10-44c5-998a-d3c9840c4ef8: !Template
+    answer_choices: null
+    id: ff80522d-1a10-44c5-998a-d3c9840c4ef8
+    jinja: 'I''ve always wondered: {{question}}
+
+
+      I searched Wikipedia and this is what I found. What''s the answer?
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["text"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: wondered
+    reference: ''
diff --git a/promptsource/templates/squad_adversarial/AddSent/templates.yaml b/promptsource/templates/squad_adversarial/AddSent/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..337d5a98144fa171ba8ea30690fc91fbf362791f
--- /dev/null
+++ b/promptsource/templates/squad_adversarial/AddSent/templates.yaml
@@ -0,0 +1,286 @@
+dataset: squad_adversarial
+subset: AddSent
+templates:
+  048c2159-2c8c-40e2-90f7-18c9623381ba: !Template
+    answer_choices: null
+    id: 048c2159-2c8c-40e2-90f7-18c9623381ba
+    jinja: 'Generate a possible question for the following short passage:
+
+
+      {{context}} |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: possible_qn
+    reference: ''
+  08fb6eac-6321-4b25-8578-14a799a103ed: !Template
+    answer_choices: null
+    id: 08fb6eac-6321-4b25-8578-14a799a103ed
+    jinja: 'After reading the following paragraph, please answer this question: {{question}}
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: after
+    reference: ''
+  1f2c2108-441a-4b3c-a5c8-8ece28edb6e1: !Template
+    answer_choices: null
+    id: 1f2c2108-441a-4b3c-a5c8-8ece28edb6e1
+    jinja: 'At what character does the text "{{answers["text"][0]}}" start in the
+      following paragraph?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find text
+    reference: ''
+  279e4019-8d67-498d-8832-a7905bc0c68d: !Template
+    answer_choices: null
+    id: 279e4019-8d67-498d-8832-a7905bc0c68d
+    jinja: 'Use the following non-answers to generate a possible short passage-question
+      pair:
+
+      {{answers["text"]|join('', '')}} |||
+
+      {{context}}
+
+      {{question}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: answers_question
+    reference: ''
+  44df6bac-bffa-4e46-b2d4-f3eb5b43cefa: !Template
+    answer_choices: null
+    id: 44df6bac-bffa-4e46-b2d4-f3eb5b43cefa
+    jinja: 'Generate a title for the following short passage:
+
+
+      {{context}} |||
+
+      {{title|replace("_"," ")}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: title
+    reference: ''
+  60ae905d-d5fa-4f60-bbcb-acb8d0ec2cf1: !Template
+    answer_choices: null
+    id: 60ae905d-d5fa-4f60-bbcb-acb8d0ec2cf1
+    jinja: "Q: {{question}}\n\nA: \n|||\n{{answers['text'] | most_frequent | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa qa
+    reference: ''
+  6118ec43-d051-4599-b24f-8779f66b9ad6: !Template
+    answer_choices: null
+    id: 6118ec43-d051-4599-b24f-8779f66b9ad6
+    jinja: '{{question}}
+
+
+      |||
+
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa
+    reference: ''
+  754e8bad-454f-4ae3-9747-299506955569: !Template
+    answer_choices: null
+    id: 754e8bad-454f-4ae3-9747-299506955569
+    jinja: 'Please come up with a good question to test reading comprehension about
+      the following paragraph:
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate question
+    reference: ''
+  7ff4bc14-08d4-47c1-9cd3-b7473d6505e7: !Template
+    answer_choices: null
+    id: 7ff4bc14-08d4-47c1-9cd3-b7473d6505e7
+    jinja: 'For the following passage-question pair, list all possible wrong answers
+      (pitfalls) test-takers may choose:
+
+
+      {{context}}
+
+      {{question}} |||
+
+      {{answers["text"]|join(", ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: possible_pitfalls
+    reference: ''
+  88b952a3-3784-43bb-a463-4a34478785d5: !Template
+    answer_choices: null
+    id: 88b952a3-3784-43bb-a463-4a34478785d5
+    jinja: '{{["Question", "Problem"]  | choice}} {{range(1, 12) | choice}}: {{question}}
+
+
+      Hint: {{context}}
+
+
+      |||
+
+      {{answers["text"] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: question/hint
+    reference: ''
+  8bcc0d77-6925-4fa1-b8cc-e6da3b272197: !Template
+    answer_choices: null
+    id: 8bcc0d77-6925-4fa1-b8cc-e6da3b272197
+    jinja: "Question: {{question}}\n\nAnswer: \n|||\n{{answers['text'] | most_frequent\
+      \ | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa question answer
+    reference: ''
+  a99d7cf5-d723-4c7a-b843-e2b8a476754d: !Template
+    answer_choices: null
+    id: a99d7cf5-d723-4c7a-b843-e2b8a476754d
+    jinja: 'I''ve always wondered: {{question}}
+
+
+      I searched Wikipedia and this is what I found. What''s the answer?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: wondered
+    reference: ''
+  a9d70ff7-8080-4eaa-9be2-1b67fe9b44f4: !Template
+    answer_choices: null
+    id: a9d70ff7-8080-4eaa-9be2-1b67fe9b44f4
+    jinja: 'I''m working on the final exam for my class and am trying to figure out
+      the answer to the question "{{question}}" I found the following info on Wikipedia
+      and I think it has the answer. Can you tell me the answer?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: exam
+    reference: ''
+  f086fa63-6ca2-48d2-857d-179ab88fce48: !Template
+    answer_choices: null
+    id: f086fa63-6ca2-48d2-857d-179ab88fce48
+    jinja: 'I''m creating a final exam for my reading class. Can you please come up
+      with a good question to quiz how well students have read the following text
+      snippet?
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: exam creation help
+    reference: ''
+  f9b51e3b-a41a-47a5-b929-76a1e0efd430: !Template
+    answer_choices: null
+    id: f9b51e3b-a41a-47a5-b929-76a1e0efd430
+    jinja: 'Count the characters up until "{{answers["text"][0]}}" appears in the
+      following chunk of text.
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: count letters
+    reference: ''
+  fb81ba4d-341a-43f0-a94f-fa7e350d10c0: !Template
+    answer_choices: null
+    id: fb81ba4d-341a-43f0-a94f-fa7e350d10c0
+    jinja: 'List all possible non-answers that have a lot of words in common with
+      the following context-question pair:
+
+
+      {{context}}
+
+      {{question}} |||
+
+      {{answers["text"]|join('', '')}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: incorrect_answers
+    reference: ''
diff --git a/promptsource/templates/squad_v2/templates.yaml b/promptsource/templates/squad_v2/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dbc1d8b8c1486270f15ea764165c1d1b5ad2f71d
--- /dev/null
+++ b/promptsource/templates/squad_v2/templates.yaml
@@ -0,0 +1,367 @@
+dataset: squad_v2
+templates:
+  17b83a3f-f748-42e6-9cdf-b2951dd8299d: !Template
+    answer_choices: null
+    id: 17b83a3f-f748-42e6-9cdf-b2951dd8299d
+    jinja: '{% set seq = [
+
+      ''Answer the question depending on the context.'',
+
+      ''What is the answer?'',
+
+      ] %}
+
+
+      {{ seq | choice }}
+
+      Context: {{context}};
+
+      Question: {{question}};
+
+      Answer: |||
+
+      {% if answers.text == [] %}
+
+      Answer not in context
+
+      {% else %}
+
+      {{answers.text[0]}}
+
+      {% endif %}'
+    metadata: &id001 !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Questions with Context
+    reference: Given context and question, give answer
+  189dcc58-fd13-4771-ad03-7879a61c7ab7: !Template
+    answer_choices: null
+    id: 189dcc58-fd13-4771-ad03-7879a61c7ab7
+    jinja: '{% if answers.text != [] %}
+
+      Determine the question that you might have asked to get back the following answer
+      for the given context
+
+      Context: {{context}};
+
+      Answer: {{answers.text[0]}};
+
+      Question: |||
+
+      {{question}}
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Jeopardy with Context
+    reference: Given context and an answer, guess the question.
+  20064b80-e4d0-41b7-9135-92c0077d5044: !Template
+    answer_choices: null
+    id: 20064b80-e4d0-41b7-9135-92c0077d5044
+    jinja: '{% set seq = [
+
+      ''What is this about? '',
+
+      ''What is the paragraph about? '',
+
+      ''Get the topic from: '',
+
+      ''From the passage,  get the topic'',
+
+      ''I want to know the topic. '',
+
+      ''Topic from the passage: '',
+
+      ''Topic from the paragraph: '',
+
+      ] %}
+
+      {{ seq | choice }}
+
+      {{context}} |||
+
+      {{title | replace("_", " ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      - Other
+      original_task: false
+    name: Topic Prediction - Context with randomized prompt options
+    reference: Asks to predict the topic given the context with additional input as
+      if a person is asking another person.
+  338cc143-361e-4796-b035-31fb2201c49f: !Template
+    answer_choices: null
+    id: 338cc143-361e-4796-b035-31fb2201c49f
+    jinja: '{% set seq = [
+
+      ''This is about '',
+
+      ''What is this about? '',
+
+      ''The paragraph is about '',
+
+      ''What is the paragraph about? '',
+
+      ''Get the topic: '',
+
+      ''From the passage, the topic is'',
+
+      ''I want to know the topic. '',
+
+      ''Topic from the passage: '',
+
+      ''Topic from the paragraph: '',
+
+      ] %}
+
+      {{context}}
+
+      {{ seq | choice }}|||
+
+      {{title | replace("_", " ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      - Other
+      original_task: false
+    name: Topic Prediction - Context with randomized prompt options placed in the
+      end
+    reference: The prompt is placed at the end of Context
+  7a44cd99-7420-4456-aaaa-34e2c81d1679: !Template
+    answer_choices: null
+    id: 7a44cd99-7420-4456-aaaa-34e2c81d1679
+    jinja: '{% if answers.text != [] %}
+
+      What is a question that would give the following answer?
+
+      Answer: {{answers.text[0]}};
+
+      Question: |||
+
+      {{question}}
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Jeopardy without Context
+    reference: Given an answer, output a viable question. Context is omitted.
+  8bea1123-fd8d-4bac-96bf-b8a289ee74b3: !Template
+    answer_choices: null
+    id: 8bea1123-fd8d-4bac-96bf-b8a289ee74b3
+    jinja: '{% set seq = [
+
+      ''Can you tell me '',
+
+      ''Please tell me '',
+
+      ''Tell me '',
+
+      ''From the passage, '',
+
+      ''I want to know '',
+
+      ''I want to ask '',
+
+      ''What is the answer to: '',
+
+      ''Find the answer to: '',
+
+      ''Answer: '',
+
+      '''',
+
+      ] %}
+
+      {{context}} {{ seq | choice }}{{question}}|||
+
+      {% if answers.text == [] %}
+
+      Answer not in context
+
+      {% else %}
+
+      {{answers.text[0]}}
+
+      {% endif %}'
+    metadata: &id002 !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: Questions with Context - Without Prompt Keywords
+    reference: Given context and question, give answer. No keywords to delineate context
+      and question is given.
+  b14c9843-fd56-42ff-817d-39e41963c847: !Template
+    answer_choices: null
+    id: b14c9843-fd56-42ff-817d-39e41963c847
+    jinja: '{% set seq = [
+
+      ''Answer the question depending on the context.'',
+
+      ''What is the answer?'',
+
+      ] %}
+
+
+      {{ seq | choice }}
+
+      Context: {{context}};
+
+      Question: {{question}};
+
+      If you can''t find the answer, please respond "unanswerable".
+
+      Answer: |||
+
+      {% if answers.text == [] %}
+
+      unanswerable
+
+      {% else %}
+
+      {{answers.text[0]}}
+
+      {% endif %}'
+    metadata: *id001
+    name: Questions with Context +unanswerable
+    reference: Given context and question, give answer
+  d768c181-1c9b-40c3-aa01-fc78c3b29875: !Template
+    answer_choices: null
+    id: d768c181-1c9b-40c3-aa01-fc78c3b29875
+    jinja: '{% if answers.text != [] %}
+
+      {{question}}|||
+
+      {{answers.text[0]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Trivia
+    reference: Given input and directly outputs answer.
+  e1630107-8f5d-44ce-8ccd-6fa80da80328: !Template
+    answer_choices: null
+    id: e1630107-8f5d-44ce-8ccd-6fa80da80328
+    jinja: '{% set seq = [
+
+      ''Can you tell me '',
+
+      ''Please tell me '',
+
+      ''Tell me '',
+
+      ''From the passage, '',
+
+      ''I want to know '',
+
+      ''I want to ask '',
+
+      ''What is the answer to: '',
+
+      ''Find the answer to: '',
+
+      ''Answer: '',
+
+      '''',
+
+      ] %}
+
+      {{context}} {{ seq | choice }}{{question}} If you can''t find the answer, please
+      respond "unanswerable". |||
+
+      {% if answers.text == [] %}
+
+      unanswerable
+
+      {% else %}
+
+      {{answers.text[0]}}
+
+      {% endif %}'
+    metadata: *id002
+    name: Questions with Context - Without Prompt Keywords +unanswerable
+    reference: Given context and question, give answer. No keywords to delineate context
+      and question is given.
+  e2e41877-4794-4ff9-9f92-a2a85105e2a7: !Template
+    answer_choices: yes ||| no
+    id: e2e41877-4794-4ff9-9f92-a2a85105e2a7
+    jinja: "Context: {{context}}; \n\nQuestion: {{question}} \n\nIs this question\
+      \ answerable? ||| \n{% if answers.text != [] %}\n{{answer_choices[0]}}\n{% else\
+      \ %}\n{{answer_choices[1]}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: Unanwerable question
+    reference: The template checks if the question is answerable or not
+  e51c23b9-5b10-4db3-a0d1-ba546830173d: !Template
+    answer_choices: null
+    id: e51c23b9-5b10-4db3-a0d1-ba546830173d
+    jinja: '{% set seq = [
+
+      ''Determine the topic of the question-answer pair. '',
+
+      ''Find the topic. '',
+
+      ''What is the topic from this? '',
+
+      ] %}
+
+      {% if answers.text != [] %}
+
+      {{ seq | choice }}
+
+      Question: {{question}};  Answer: {{answers.text[0]}}; Topic: |||
+
+      {{title}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      - Other
+      original_task: false
+    name: Topic Prediction - Question and Answer Pair
+    reference: Given a Question-Answer pair, generate the topic.
+  fdcf132e-6c70-4188-999e-93601ee8e089: !Template
+    answer_choices: null
+    id: fdcf132e-6c70-4188-999e-93601ee8e089
+    jinja: 'What is the following passage about?
+
+      {{context}} |||
+
+      {{title | replace("_", " ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      - Other
+      original_task: false
+    name: Topic Prediction - Context
+    reference: Predict the topic from the passage
diff --git a/promptsource/templates/squadshifts/amazon/templates.yaml b/promptsource/templates/squadshifts/amazon/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1191e891591b648fa6c438af441be2841ada53cf
--- /dev/null
+++ b/promptsource/templates/squadshifts/amazon/templates.yaml
@@ -0,0 +1,286 @@
+dataset: squadshifts
+subset: amazon
+templates:
+  0b69eb58-0ad7-45b4-baf2-f3abac775fe2: !Template
+    answer_choices: null
+    id: 0b69eb58-0ad7-45b4-baf2-f3abac775fe2
+    jinja: 'Generate a title for the following short passage:
+
+
+      {{context}} |||
+
+      {{title|replace("_"," ")}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: title
+    reference: ''
+  129b0f6f-3c65-4e7c-97c2-ef30cf92f417: !Template
+    answer_choices: null
+    id: 129b0f6f-3c65-4e7c-97c2-ef30cf92f417
+    jinja: "Question: {{question}}\n\nAnswer: \n|||\n{{answers['text'] | most_frequent\
+      \ | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa question answer
+    reference: ''
+  258731f1-2101-4178-b30f-571336fbef78: !Template
+    answer_choices: null
+    id: 258731f1-2101-4178-b30f-571336fbef78
+    jinja: 'For the following passage-question pair, list all possible wrong answers
+      (pitfalls) test-takers may choose:
+
+
+      {{context}}
+
+      {{question}} |||
+
+      {{answers["text"]|join(", ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: possible_pitfalls
+    reference: ''
+  3c6bb901-c615-478f-904b-29122208e8bf: !Template
+    answer_choices: null
+    id: 3c6bb901-c615-478f-904b-29122208e8bf
+    jinja: 'After reading the following paragraph, please answer this question: {{question}}
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: after
+    reference: ''
+  4c9c3a3c-da8c-42aa-a605-3063122c32eb: !Template
+    answer_choices: null
+    id: 4c9c3a3c-da8c-42aa-a605-3063122c32eb
+    jinja: '{{question}}
+
+
+      |||
+
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa
+    reference: ''
+  5cce68fe-de8c-40ed-b1f5-d70ce78859df: !Template
+    answer_choices: null
+    id: 5cce68fe-de8c-40ed-b1f5-d70ce78859df
+    jinja: 'Generate a possible question for the following short passage:
+
+
+      {{context}} |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: possible_qn
+    reference: ''
+  7ff63436-6cff-4963-b208-2002151daad7: !Template
+    answer_choices: null
+    id: 7ff63436-6cff-4963-b208-2002151daad7
+    jinja: 'List all possible non-answers that have a lot of words in common with
+      the following context-question pair:
+
+
+      {{context}}
+
+      {{question}} |||
+
+      {{answers["text"]|join('', '')}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: incorrect_answers
+    reference: ''
+  952fccbc-c318-455b-97a5-e72070d7fbe3: !Template
+    answer_choices: null
+    id: 952fccbc-c318-455b-97a5-e72070d7fbe3
+    jinja: 'I''m creating a final exam for my reading class. Can you please come up
+      with a good question to quiz how well students have read the following text
+      snippet?
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: exam creation help
+    reference: ''
+  969cb982-3751-451f-a211-79c754a8d638: !Template
+    answer_choices: null
+    id: 969cb982-3751-451f-a211-79c754a8d638
+    jinja: 'Please come up with a good question to test reading comprehension about
+      the following paragraph:
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate question
+    reference: ''
+  b9296bf2-fb8c-4918-b118-d987aa92db3c: !Template
+    answer_choices: null
+    id: b9296bf2-fb8c-4918-b118-d987aa92db3c
+    jinja: 'Count the characters up until "{{answers["text"][0]}}" appears in the
+      following chunk of text.
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: count letters
+    reference: ''
+  bc766330-9e98-4d10-be7d-bca0393308cb: !Template
+    answer_choices: null
+    id: bc766330-9e98-4d10-be7d-bca0393308cb
+    jinja: 'I''m working on the final exam for my class and am trying to figure out
+      the answer to the question "{{question}}" I found the following info on Wikipedia
+      and I think it has the answer. Can you tell me the answer?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: exam
+    reference: ''
+  dff1c9a3-0ef5-4f5e-8630-8f41e297f4c7: !Template
+    answer_choices: null
+    id: dff1c9a3-0ef5-4f5e-8630-8f41e297f4c7
+    jinja: 'At what character does the text "{{answers["text"][0]}}" start in the
+      following paragraph?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find text
+    reference: ''
+  e68c1bd0-1bcf-4ee1-89f1-01f2fb76ddfc: !Template
+    answer_choices: null
+    id: e68c1bd0-1bcf-4ee1-89f1-01f2fb76ddfc
+    jinja: '{{["Question", "Problem"]  | choice}} {{range(1, 12) | choice}}: {{question}}
+
+
+      Hint: {{context}}
+
+
+      |||
+
+      {{answers["text"] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: question/hint
+    reference: ''
+  e77c3f2d-a53b-4086-b236-14c63fc327e9: !Template
+    answer_choices: null
+    id: e77c3f2d-a53b-4086-b236-14c63fc327e9
+    jinja: "Q: {{question}}\n\nA: \n|||\n{{answers['text'] | most_frequent | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa qa
+    reference: ''
+  f3187941-53ef-4285-b5bd-deaf1ef81001: !Template
+    answer_choices: null
+    id: f3187941-53ef-4285-b5bd-deaf1ef81001
+    jinja: 'Use the following non-answers to generate a possible short passage-question
+      pair:
+
+      {{answers["text"]|join('', '')}} |||
+
+      {{context}}
+
+      {{question}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: answers_question
+    reference: ''
+  f8e5918c-d9e2-40cc-93f3-65c7ed11bf09: !Template
+    answer_choices: null
+    id: f8e5918c-d9e2-40cc-93f3-65c7ed11bf09
+    jinja: 'I''ve always wondered: {{question}}
+
+
+      I searched Wikipedia and this is what I found. What''s the answer?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: wondered
+    reference: ''
diff --git a/promptsource/templates/squadshifts/new_wiki/templates.yaml b/promptsource/templates/squadshifts/new_wiki/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..32d932122b78f157ce2d4ea6aedd8739e9874add
--- /dev/null
+++ b/promptsource/templates/squadshifts/new_wiki/templates.yaml
@@ -0,0 +1,286 @@
+dataset: squadshifts
+subset: new_wiki
+templates:
+  1c38fc00-2a70-482b-b491-a0603e6db137: !Template
+    answer_choices: null
+    id: 1c38fc00-2a70-482b-b491-a0603e6db137
+    jinja: 'For the following passage-question pair, list all possible wrong answers
+      (pitfalls) test-takers may choose:
+
+
+      {{context}}
+
+      {{question}} |||
+
+      {{answers["text"]|join(", ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: possible_pitfalls
+    reference: ''
+  1f282852-c08f-46f2-a702-c1b94332c85d: !Template
+    answer_choices: null
+    id: 1f282852-c08f-46f2-a702-c1b94332c85d
+    jinja: 'Generate a possible question for the following short passage:
+
+
+      {{context}} |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: possible_qn
+    reference: ''
+  4ba25909-7d60-4374-a030-968189b4e444: !Template
+    answer_choices: null
+    id: 4ba25909-7d60-4374-a030-968189b4e444
+    jinja: "Q: {{question}}\n\nA: \n|||\n{{answers['text'] | most_frequent | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa qa
+    reference: ''
+  4fbdd9ac-e770-4383-a5b4-0154e6d0cf99: !Template
+    answer_choices: null
+    id: 4fbdd9ac-e770-4383-a5b4-0154e6d0cf99
+    jinja: 'At what character does the text "{{answers["text"][0]}}" start in the
+      following paragraph?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find text
+    reference: ''
+  5b1e053d-8ebe-43d5-85b5-c61b8ce444be: !Template
+    answer_choices: null
+    id: 5b1e053d-8ebe-43d5-85b5-c61b8ce444be
+    jinja: 'Count the characters up until "{{answers["text"][0]}}" appears in the
+      following chunk of text.
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: count letters
+    reference: ''
+  60995116-53af-456f-ac20-858b83fa9ba6: !Template
+    answer_choices: null
+    id: 60995116-53af-456f-ac20-858b83fa9ba6
+    jinja: 'After reading the following paragraph, please answer this question: {{question}}
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: after
+    reference: ''
+  612266f2-da8e-4a88-b8fc-69c7be342cfd: !Template
+    answer_choices: null
+    id: 612266f2-da8e-4a88-b8fc-69c7be342cfd
+    jinja: '{{question}}
+
+
+      |||
+
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa
+    reference: ''
+  694f0a21-4cc2-4485-88e6-e5b1ceb9a341: !Template
+    answer_choices: null
+    id: 694f0a21-4cc2-4485-88e6-e5b1ceb9a341
+    jinja: "Question: {{question}}\n\nAnswer: \n|||\n{{answers['text'] | most_frequent\
+      \ | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa question answer
+    reference: ''
+  a5691e58-f2cc-41eb-8308-c7046856f72f: !Template
+    answer_choices: null
+    id: a5691e58-f2cc-41eb-8308-c7046856f72f
+    jinja: 'Use the following non-answers to generate a possible short passage-question
+      pair:
+
+      {{answers["text"]|join('', '')}} |||
+
+      {{context}}
+
+      {{question}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: answers_question
+    reference: ''
+  bc874f68-ce23-43cd-9683-5574c9ef01cb: !Template
+    answer_choices: null
+    id: bc874f68-ce23-43cd-9683-5574c9ef01cb
+    jinja: 'I''ve always wondered: {{question}}
+
+
+      I searched Wikipedia and this is what I found. What''s the answer?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: wondered
+    reference: ''
+  bfa571de-6076-44c2-b23b-8b2a404b180d: !Template
+    answer_choices: null
+    id: bfa571de-6076-44c2-b23b-8b2a404b180d
+    jinja: '{{["Question", "Problem"]  | choice}} {{range(1, 12) | choice}}: {{question}}
+
+
+      Hint: {{context}}
+
+
+      |||
+
+      {{answers["text"] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: question/hint
+    reference: ''
+  caec82a4-f845-4e10-aad4-19111c9884c1: !Template
+    answer_choices: null
+    id: caec82a4-f845-4e10-aad4-19111c9884c1
+    jinja: 'I''m creating a final exam for my reading class. Can you please come up
+      with a good question to quiz how well students have read the following text
+      snippet?
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: exam creation help
+    reference: ''
+  d760501f-7726-48ec-9d86-46c10ce408d3: !Template
+    answer_choices: null
+    id: d760501f-7726-48ec-9d86-46c10ce408d3
+    jinja: 'Generate a title for the following short passage:
+
+
+      {{context}} |||
+
+      {{title|replace("_"," ")}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: title
+    reference: ''
+  e5b77630-c87c-47b5-9544-5a68cd6b5a93: !Template
+    answer_choices: null
+    id: e5b77630-c87c-47b5-9544-5a68cd6b5a93
+    jinja: 'List all possible non-answers that have a lot of words in common with
+      the following context-question pair:
+
+
+      {{context}}
+
+      {{question}} |||
+
+      {{answers["text"]|join('', '')}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: incorrect_answers
+    reference: ''
+  f372bda7-8ac7-4b7f-a777-37c6cdc18f34: !Template
+    answer_choices: null
+    id: f372bda7-8ac7-4b7f-a777-37c6cdc18f34
+    jinja: 'Please come up with a good question to test reading comprehension about
+      the following paragraph:
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate question
+    reference: ''
+  f602988d-c3ea-4894-9fb6-7fadbb9d87c8: !Template
+    answer_choices: null
+    id: f602988d-c3ea-4894-9fb6-7fadbb9d87c8
+    jinja: 'I''m working on the final exam for my class and am trying to figure out
+      the answer to the question "{{question}}" I found the following info on Wikipedia
+      and I think it has the answer. Can you tell me the answer?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: exam
+    reference: ''
diff --git a/promptsource/templates/squadshifts/nyt/templates.yaml b/promptsource/templates/squadshifts/nyt/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2ccf3d719e95e34bd0873e9d6a19a15338b1ae43
--- /dev/null
+++ b/promptsource/templates/squadshifts/nyt/templates.yaml
@@ -0,0 +1,286 @@
+dataset: squadshifts
+subset: nyt
+templates:
+  07b55280-3b59-43a2-acb7-84cff5938a94: !Template
+    answer_choices: null
+    id: 07b55280-3b59-43a2-acb7-84cff5938a94
+    jinja: "Question: {{question}}\n\nAnswer: \n|||\n{{answers['text'] | most_frequent\
+      \ | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa question answer
+    reference: ''
+  09548d27-c97f-48d4-b9e4-ff591e4caeee: !Template
+    answer_choices: null
+    id: 09548d27-c97f-48d4-b9e4-ff591e4caeee
+    jinja: '{{question}}
+
+
+      |||
+
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa
+    reference: ''
+  0cc0f615-9d2e-4d44-91a0-69042eb31a52: !Template
+    answer_choices: null
+    id: 0cc0f615-9d2e-4d44-91a0-69042eb31a52
+    jinja: "Q: {{question}}\n\nA: \n|||\n{{answers['text'] | most_frequent | choice}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: cbqa qa
+    reference: ''
+  45f2143e-52cb-4498-abf7-54cee1eec9db: !Template
+    answer_choices: null
+    id: 45f2143e-52cb-4498-abf7-54cee1eec9db
+    jinja: 'After reading the following paragraph, please answer this question: {{question}}
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: after
+    reference: ''
+  4d61ade3-199c-443f-9bf6-f49ce6bcf85f: !Template
+    answer_choices: null
+    id: 4d61ade3-199c-443f-9bf6-f49ce6bcf85f
+    jinja: 'At what character does the text "{{answers["text"][0]}}" start in the
+      following paragraph?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: find text
+    reference: ''
+  8557989a-e874-4c10-b63b-63687105c8a1: !Template
+    answer_choices: null
+    id: 8557989a-e874-4c10-b63b-63687105c8a1
+    jinja: 'Please come up with a good question to test reading comprehension about
+      the following paragraph:
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate question
+    reference: ''
+  857d47f4-aa11-4772-b0eb-828872a13c0a: !Template
+    answer_choices: null
+    id: 857d47f4-aa11-4772-b0eb-828872a13c0a
+    jinja: 'Use the following non-answers to generate a possible short passage-question
+      pair:
+
+      {{answers["text"]|join('', '')}} |||
+
+      {{context}}
+
+      {{question}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: answers_question
+    reference: ''
+  a69a1548-2bef-447c-bc7b-a6de28e0ed43: !Template
+    answer_choices: null
+    id: a69a1548-2bef-447c-bc7b-a6de28e0ed43
+    jinja: 'Count the characters up until "{{answers["text"][0]}}" appears in the
+      following chunk of text.
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{answers["answer_start"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: count letters
+    reference: ''
+  a9017ef5-345f-4815-8c8a-eb16c6e40305: !Template
+    answer_choices: null
+    id: a9017ef5-345f-4815-8c8a-eb16c6e40305
+    jinja: 'List all possible non-answers that have a lot of words in common with
+      the following context-question pair:
+
+
+      {{context}}
+
+      {{question}} |||
+
+      {{answers["text"]|join('', '')}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: incorrect_answers
+    reference: ''
+  a9733fa0-36bb-46a7-b895-dd347ab6c30d: !Template
+    answer_choices: null
+    id: a9733fa0-36bb-46a7-b895-dd347ab6c30d
+    jinja: 'Generate a possible question for the following short passage:
+
+
+      {{context}} |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: possible_qn
+    reference: ''
+  b9a6bc1e-2438-44f2-af80-388d9c9e40a1: !Template
+    answer_choices: null
+    id: b9a6bc1e-2438-44f2-af80-388d9c9e40a1
+    jinja: '{{["Question", "Problem"]  | choice}} {{range(1, 12) | choice}}: {{question}}
+
+
+      Hint: {{context}}
+
+
+      |||
+
+      {{answers["text"] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: question/hint
+    reference: ''
+  d57bce72-7ff8-4c20-b3fe-193461f68944: !Template
+    answer_choices: null
+    id: d57bce72-7ff8-4c20-b3fe-193461f68944
+    jinja: 'I''m creating a final exam for my reading class. Can you please come up
+      with a good question to quiz how well students have read the following text
+      snippet?
+
+
+      {{context}}
+
+
+      |||
+
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: exam creation help
+    reference: ''
+  d9ff0860-0ead-4529-b68d-ff0f05c5d1bd: !Template
+    answer_choices: null
+    id: d9ff0860-0ead-4529-b68d-ff0f05c5d1bd
+    jinja: 'Generate a title for the following short passage:
+
+
+      {{context}} |||
+
+      {{title|replace("_"," ")}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: title
+    reference: ''
+  e878e5d6-f71f-4c2e-80a3-1fa416e9531e: !Template
+    answer_choices: null
+    id: e878e5d6-f71f-4c2e-80a3-1fa416e9531e
+    jinja: 'I''ve always wondered: {{question}}
+
+
+      I searched Wikipedia and this is what I found. What''s the answer?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: wondered
+    reference: ''
+  ea4d6a09-d413-4ca4-9b6b-43de7aacc831: !Template
+    answer_choices: null
+    id: ea4d6a09-d413-4ca4-9b6b-43de7aacc831
+    jinja: 'I''m working on the final exam for my class and am trying to figure out
+      the answer to the question "{{question}}" I found the following info on Wikipedia
+      and I think it has the answer. Can you tell me the answer?
+
+
+      {{context}}
+
+
+      |||
+
+      {{answers[''text''] | most_frequent | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: exam
+    reference: ''
+  f9d5a9a8-53c4-4443-88a5-5a613d89744a: !Template
+    answer_choices: null
+    id: f9d5a9a8-53c4-4443-88a5-5a613d89744a
+    jinja: 'For the following passage-question pair, list all possible wrong answers
+      (pitfalls) test-takers may choose:
+
+
+      {{context}}
+
+      {{question}} |||
+
+      {{answers["text"]|join(", ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: possible_pitfalls
+    reference: ''
diff --git a/promptsource/templates/sst/default/templates.yaml b/promptsource/templates/sst/default/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b8941f1af1c573a83037b6b2ec16dad92e435770
--- /dev/null
+++ b/promptsource/templates/sst/default/templates.yaml
@@ -0,0 +1,50 @@
+dataset: sst
+subset: default
+templates:
+  5119a0b5-5d82-4401-900a-7fafc1d48ff6: !Template
+    answer_choices: null
+    id: 5119a0b5-5d82-4401-900a-7fafc1d48ff6
+    jinja: 'How positive is the movie review below?
+
+      Give a score on a scale from 0 to 1.
+
+
+      {{sentence}} |||
+
+      {{''%0.1f''| format(label|float)}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sentiment_scoring
+    reference: ''
+  b15994be-ca57-4924-9af7-fbaa6ee0124b: !Template
+    answer_choices: no ||| yes
+    id: b15994be-ca57-4924-9af7-fbaa6ee0124b
+    jinja: 'Is the movie review below positive?
+
+
+      {{sentence}} |||
+
+      {{answer_choices
+
+      [0 if label < 0.5 else 1]
+
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sentiment_classification
+    reference: ''
+  c5dc88f9-204f-43be-b09f-f6334215e271: !Template
+    answer_choices: null
+    id: c5dc88f9-204f-43be-b09f-f6334215e271
+    jinja: "Form a sentence parse tree (formatted as a parent pointer tree) using\
+      \ the tokens below:\n\n{{sentence}} |||\n {{tokens.replace('|',' ')}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: tree_from_tokens
+    reference: ''
diff --git a/promptsource/templates/story_cloze/2016/templates.yaml b/promptsource/templates/story_cloze/2016/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b01484750d1156c2384d44464a80ed068c453aeb
--- /dev/null
+++ b/promptsource/templates/story_cloze/2016/templates.yaml
@@ -0,0 +1,87 @@
+dataset: story_cloze
+subset: '2016'
+templates:
+  1a4946f9-a0e2-4fbb-aee8-b26ead2cf6b8: !Template
+    answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+    id: 1a4946f9-a0e2-4fbb-aee8-b26ead2cf6b8
+    jinja: '{{input_sentence_1}} {{input_sentence_2}} {{input_sentence_3}} {{input_sentence_4}}
+      What is a possible continuation for the story given the following options ?
+      - {{answer_choices | join("\n- ")}} ||| {{answer_choices[answer_right_ending
+      -1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Answer Given options
+    reference: ''
+  1a9d53bc-eb77-4e7c-af6e-3d15b79d6cf1: !Template
+    answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+    id: 1a9d53bc-eb77-4e7c-af6e-3d15b79d6cf1
+    jinja: "Read the following story :\n\n{{input_sentence_1}}\n{{input_sentence_2}}\n\
+      {{input_sentence_3}}\n{{input_sentence_4}}\n\nChoose a possible ending for the\
+      \ previous story from the following options: \n- {{answer_choices | join(\"\\\
+      n- \")}}\n|||\n\n{{answer_choices[answer_right_ending -1]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Choose Story Ending
+    reference: ''
+  9dab69d1-cad0-4d2f-a7cc-120df233571c: !Template
+    answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+    id: 9dab69d1-cad0-4d2f-a7cc-120df233571c
+    jinja: 'Yesterday, I watched a movie. Here''s what happened: {{input_sentence_1}}
+      {{input_sentence_2}} {{input_sentence_3}} {{input_sentence_4}} What happens
+      next? - {{answer_choices | join("\n- ")}} ||| {{answer_choices[answer_right_ending
+      -1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Movie What Happens Next
+    reference: ''
+  b5c8445f-2d3a-4691-bdd5-58956816702f: !Template
+    answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+    id: b5c8445f-2d3a-4691-bdd5-58956816702f
+    jinja: "What is a possible continuation for the following story ? \n\n{{input_sentence_1}}\n\
+      {{input_sentence_2}}\n{{input_sentence_3}}\n{{input_sentence_4}}\n\nChoose from\
+      \ the following options:\n- {{answer_choices | join(\"\\n- \")}}\n|||\n\n{{answer_choices[answer_right_ending\
+      \ -1]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Story Continuation and Options
+    reference: ''
+  baffa716-43cf-4954-a35c-655d775321e6: !Template
+    answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+    id: baffa716-43cf-4954-a35c-655d775321e6
+    jinja: 'Generate a possible ending for the following story: {{input_sentence_1}}
+      {{input_sentence_2}} {{input_sentence_3}} {{input_sentence_4}} ||| {{answer_choices[answer_right_ending
+      -1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Generate Ending
+    reference: ''
+  c6f3d802-4f97-449f-a911-03470d418f7d: !Template
+    answer_choices: '{{sentence_quiz1}} ||| {{sentence_quiz2}}'
+    id: c6f3d802-4f97-449f-a911-03470d418f7d
+    jinja: 'I read the following novel: {{input_sentence_1}} {{input_sentence_2}}
+      {{input_sentence_3}} {{input_sentence_4}} What do you think is the most probable
+      ending? You can choose from the following options: - {{answer_choices | join("\n-
+      ")}} ||| {{answer_choices[answer_right_ending -1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Novel Correct Ending
+    reference: ''
diff --git a/promptsource/templates/stsb_multi_mt/en/templates.yaml b/promptsource/templates/stsb_multi_mt/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..26cebfad6dbf022ed318e54ed24962453c5f4fc9
--- /dev/null
+++ b/promptsource/templates/stsb_multi_mt/en/templates.yaml
@@ -0,0 +1,59 @@
+dataset: stsb_multi_mt
+subset: en
+templates:
+  6c0bdf61-9baa-415a-bf03-fdb8789d1740: !Template
+    answer_choices: null
+    id: 6c0bdf61-9baa-415a-bf03-fdb8789d1740
+    jinja: How similar are "{{sentence1}}" and "{{sentence2}}"? Give a score between
+      {{"0.0"}} and {{"5.0"}}. ||| {{(((5*similarity_score)|round)/5)}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Similarity_scoring_3
+    reference: ''
+  6df357b5-f8ea-49d2-b304-3541acb5271a: !Template
+    answer_choices: no ||| yes
+    id: 6df357b5-f8ea-49d2-b304-3541acb5271a
+    jinja: Do you think "{{sentence1}}" and "{{sentence2}}" express the same thing?
+      ||| {{answer_choices[0 if similarity_score < 2.5 else 1]}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Similarity_scoring_binary_1
+    reference: sst2
+  775af665-d8a5-46b2-bfcf-2a21abc7e99c: !Template
+    answer_choices: no ||| yes
+    id: 775af665-d8a5-46b2-bfcf-2a21abc7e99c
+    jinja: Do "{{sentence1}}" and "{{sentence2}}" seem similar to you ? ||| {{answer_choices[0
+      if similarity_score < 2.5 else 1]}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Similarity_scoring_binary_2
+    reference: ''
+  9cab340c-32ce-465d-be89-049e4a63af11: !Template
+    answer_choices: null
+    id: 9cab340c-32ce-465d-be89-049e4a63af11
+    jinja: On a scale from {{"1.0"}} to {{"5.0"}}, how similar are "{{sentence1}}"
+      and "{{sentence2}}"? ||| {{(((5*similarity_score)|round)/5)}}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Similarity_scoring_1
+    reference: ''
+  e0551bee-61f0-4c1e-9c3f-18c8b54439f8: !Template
+    answer_choices: null
+    id: e0551bee-61f0-4c1e-9c3f-18c8b54439f8
+    jinja: "Rate the similarity of these two sentences: ({{\"1.0\"}} being the lowest\
+      \ and {{\"5.0\"}} the highest)\n\"{{sentence1}}\" and \"{{sentence2}}\" \n|||\n\
+      {{(((5*similarity_score)|round)/5)}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Similarity_scoring_2
+    reference: ''
diff --git a/promptsource/templates/subjqa/books/templates.yaml b/promptsource/templates/subjqa/books/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..db84e8070b26d66c642e26c8fe5f0444dcd3bb3e
--- /dev/null
+++ b/promptsource/templates/subjqa/books/templates.yaml
@@ -0,0 +1,160 @@
+dataset: subjqa
+subset: books
+templates:
+  071f2b19-7392-4258-8a60-5a96f3e44b0d: !Template
+    answer_choices: null
+    id: 071f2b19-7392-4258-8a60-5a96f3e44b0d
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Answer the following question with extracts from the context: {{question}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_first
+    reference: ''
+  a217525b-caf2-4ae3-8a6e-06bd48bf4728: !Template
+    answer_choices: null
+    id: a217525b-caf2-4ae3-8a6e-06bd48bf4728
+    jinja: '{{question}}
+
+
+      Answer using extracts from the following context. If you can''t find an answer,
+      return {{"Unanswerable"}}
+
+
+      Context:
+
+      {{context}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_after
+    reference: ''
+  afe5086e-d9fe-4981-bcac-67d580950110: !Template
+    answer_choices: null
+    id: afe5086e-d9fe-4981-bcac-67d580950110
+    jinja: 'Question:
+
+      {{question}}
+
+
+      On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score
+    reference: ''
+  b4a015eb-9346-4739-9ebd-5f91d2f230be: !Template
+    answer_choices: null
+    id: b4a015eb-9346-4739-9ebd-5f91d2f230be
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Question:
+
+      {{question}}
+
+
+      How would you rate the subjectivity of the question (on a 1 to 5 scale with
+      1 being the most subjective)?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score_with_context
+    reference: ''
+  f074e3ce-966c-4d63-8d03-f0b6e5093b38: !Template
+    answer_choices: null
+    id: f074e3ce-966c-4d63-8d03-f0b6e5093b38
+    jinja: 'Possible categories:
+
+      - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"]  |
+      join("\n- ") }}
+
+
+      Context:
+
+      {{context}}
+
+
+      Which of the category corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q
+    reference: ''
+  f53cc9f9-1d34-47ff-b440-a6ad896bdc4a: !Template
+    answer_choices: null
+    id: f53cc9f9-1d34-47ff-b440-a6ad896bdc4a
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+      corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q_v2
+    reference: ''
diff --git a/promptsource/templates/subjqa/electronics/templates.yaml b/promptsource/templates/subjqa/electronics/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..48a904712cee86efaec8398eb169f6505b6aa7be
--- /dev/null
+++ b/promptsource/templates/subjqa/electronics/templates.yaml
@@ -0,0 +1,160 @@
+dataset: subjqa
+subset: electronics
+templates:
+  2077a669-1574-4117-84fe-e683bead8d46: !Template
+    answer_choices: null
+    id: 2077a669-1574-4117-84fe-e683bead8d46
+    jinja: 'Possible categories:
+
+      - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"]  |
+      join("\n- ") }}
+
+
+      Context:
+
+      {{context}}
+
+
+      Which of the category corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q
+    reference: ''
+  68baa479-1ad6-41f4-ad48-75d06683f1d2: !Template
+    answer_choices: null
+    id: 68baa479-1ad6-41f4-ad48-75d06683f1d2
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Answer the following question with extracts from the context: {{question}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_first
+    reference: ''
+  6caf413a-d696-4fdc-9f85-41aa0197f180: !Template
+    answer_choices: null
+    id: 6caf413a-d696-4fdc-9f85-41aa0197f180
+    jinja: '{{question}}
+
+
+      Answer using extracts from the following context. If you can''t find an answer,
+      return {{"Unanswerable"}}
+
+
+      Context:
+
+      {{context}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_after
+    reference: ''
+  6d03b0a7-0b1f-4165-af90-071baab36e49: !Template
+    answer_choices: null
+    id: 6d03b0a7-0b1f-4165-af90-071baab36e49
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Question:
+
+      {{question}}
+
+
+      How would you rate the subjectivity of the question (on a 1 to 5 scale with
+      1 being the most subjective)?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score_with_context
+    reference: ''
+  772db86f-9435-4ac1-bc9a-8bef389e5a80: !Template
+    answer_choices: null
+    id: 772db86f-9435-4ac1-bc9a-8bef389e5a80
+    jinja: 'Question:
+
+      {{question}}
+
+
+      On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score
+    reference: ''
+  e35fe9f1-84db-4436-9264-e30f090052f0: !Template
+    answer_choices: null
+    id: e35fe9f1-84db-4436-9264-e30f090052f0
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+      corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q_v2
+    reference: ''
diff --git a/promptsource/templates/subjqa/grocery/templates.yaml b/promptsource/templates/subjqa/grocery/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..17f6360450bd33059ba5a6a07eca77313a9a659c
--- /dev/null
+++ b/promptsource/templates/subjqa/grocery/templates.yaml
@@ -0,0 +1,160 @@
+dataset: subjqa
+subset: grocery
+templates:
+  255dd1c5-3129-4f69-ae4f-3f2b47be926d: !Template
+    answer_choices: null
+    id: 255dd1c5-3129-4f69-ae4f-3f2b47be926d
+    jinja: 'Possible categories:
+
+      - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"]  |
+      join("\n- ") }}
+
+
+      Context:
+
+      {{context}}
+
+
+      Which of the category corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q
+    reference: ''
+  4857a5ed-9df9-417b-ac6e-504604ab7e37: !Template
+    answer_choices: null
+    id: 4857a5ed-9df9-417b-ac6e-504604ab7e37
+    jinja: 'Question:
+
+      {{question}}
+
+
+      On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score
+    reference: ''
+  5173e70e-7396-4932-95b6-3b740058a6bc: !Template
+    answer_choices: null
+    id: 5173e70e-7396-4932-95b6-3b740058a6bc
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Question:
+
+      {{question}}
+
+
+      How would you rate the subjectivity of the question (on a 1 to 5 scale with
+      1 being the most subjective)?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score_with_context
+    reference: ''
+  90b561d0-307f-49aa-a642-bbbad543f498: !Template
+    answer_choices: null
+    id: 90b561d0-307f-49aa-a642-bbbad543f498
+    jinja: '{{question}}
+
+
+      Answer using extracts from the following context. If you can''t find an answer,
+      return {{"Unanswerable"}}
+
+
+      Context:
+
+      {{context}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_after
+    reference: ''
+  c6ef2acd-f32f-49f5-9803-5017412f739d: !Template
+    answer_choices: null
+    id: c6ef2acd-f32f-49f5-9803-5017412f739d
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Answer the following question with extracts from the context: {{question}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_first
+    reference: ''
+  f19ac17f-ed79-4f64-9f7b-511d9f4e4c6b: !Template
+    answer_choices: null
+    id: f19ac17f-ed79-4f64-9f7b-511d9f4e4c6b
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+      corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q_v2
+    reference: ''
diff --git a/promptsource/templates/subjqa/movies/templates.yaml b/promptsource/templates/subjqa/movies/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85e09cb6e1f559c62d6cacd4088c4a1673680f55
--- /dev/null
+++ b/promptsource/templates/subjqa/movies/templates.yaml
@@ -0,0 +1,160 @@
+dataset: subjqa
+subset: movies
+templates:
+  36c91233-23e5-4a3d-a5d9-b58a9a5db16b: !Template
+    answer_choices: null
+    id: 36c91233-23e5-4a3d-a5d9-b58a9a5db16b
+    jinja: 'Question:
+
+      {{question}}
+
+
+      On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score
+    reference: ''
+  4aab6eb5-12e3-433e-90f2-6fd42d608e54: !Template
+    answer_choices: null
+    id: 4aab6eb5-12e3-433e-90f2-6fd42d608e54
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Answer the following question with extracts from the context: {{question}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_first
+    reference: ''
+  a66b2864-1bb4-4b39-8387-6ec2dc6c533f: !Template
+    answer_choices: null
+    id: a66b2864-1bb4-4b39-8387-6ec2dc6c533f
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+      corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q_v2
+    reference: ''
+  b55d80d5-788d-406e-be37-e911a7aa7236: !Template
+    answer_choices: null
+    id: b55d80d5-788d-406e-be37-e911a7aa7236
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Question:
+
+      {{question}}
+
+
+      How would you rate the subjectivity of the question (on a 1 to 5 scale with
+      1 being the most subjective)?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score_with_context
+    reference: ''
+  cac989ae-ff00-4be6-b909-65cabdfb6017: !Template
+    answer_choices: null
+    id: cac989ae-ff00-4be6-b909-65cabdfb6017
+    jinja: 'Possible categories:
+
+      - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"]  |
+      join("\n- ") }}
+
+
+      Context:
+
+      {{context}}
+
+
+      Which of the category corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q
+    reference: ''
+  fedc0f61-4cac-4baa-9f3b-283ac21fe2a4: !Template
+    answer_choices: null
+    id: fedc0f61-4cac-4baa-9f3b-283ac21fe2a4
+    jinja: '{{question}}
+
+
+      Answer using extracts from the following context. If you can''t find an answer,
+      return {{"Unanswerable"}}
+
+
+      Context:
+
+      {{context}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_after
+    reference: ''
diff --git a/promptsource/templates/subjqa/restaurants/templates.yaml b/promptsource/templates/subjqa/restaurants/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ab792cba6eade079aaf2399fe54af7e6747883c7
--- /dev/null
+++ b/promptsource/templates/subjqa/restaurants/templates.yaml
@@ -0,0 +1,160 @@
+dataset: subjqa
+subset: restaurants
+templates:
+  5177d00a-255d-4a80-bb77-2d94f40e276c: !Template
+    answer_choices: null
+    id: 5177d00a-255d-4a80-bb77-2d94f40e276c
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Question:
+
+      {{question}}
+
+
+      How would you rate the subjectivity of the question (on a 1 to 5 scale with
+      1 being the most subjective)?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score_with_context
+    reference: ''
+  7a2ecf8e-8646-42f8-a7b6-3422ceab6e85: !Template
+    answer_choices: null
+    id: 7a2ecf8e-8646-42f8-a7b6-3422ceab6e85
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+      corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q_v2
+    reference: ''
+  7d900ca3-d6d6-41a8-bd64-d3c1547004d0: !Template
+    answer_choices: null
+    id: 7d900ca3-d6d6-41a8-bd64-d3c1547004d0
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Answer the following question with extracts from the context: {{question}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_first
+    reference: ''
+  8984babd-1a5d-456e-b439-2736627f0883: !Template
+    answer_choices: null
+    id: 8984babd-1a5d-456e-b439-2736627f0883
+    jinja: '{{question}}
+
+
+      Answer using extracts from the following context. If you can''t find an answer,
+      return {{"Unanswerable"}}
+
+
+      Context:
+
+      {{context}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_after
+    reference: ''
+  8ed11c13-6160-4b19-b643-77fb6e4aff33: !Template
+    answer_choices: null
+    id: 8ed11c13-6160-4b19-b643-77fb6e4aff33
+    jinja: 'Possible categories:
+
+      - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"]  |
+      join("\n- ") }}
+
+
+      Context:
+
+      {{context}}
+
+
+      Which of the category corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q
+    reference: ''
+  afd9a593-21db-4bf8-842c-9259a7e73e99: !Template
+    answer_choices: null
+    id: afd9a593-21db-4bf8-842c-9259a7e73e99
+    jinja: 'Question:
+
+      {{question}}
+
+
+      On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score
+    reference: ''
diff --git a/promptsource/templates/subjqa/tripadvisor/templates.yaml b/promptsource/templates/subjqa/tripadvisor/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..92cc736c294709859abda2b00cfe97fa5608a13e
--- /dev/null
+++ b/promptsource/templates/subjqa/tripadvisor/templates.yaml
@@ -0,0 +1,160 @@
+dataset: subjqa
+subset: tripadvisor
+templates:
+  0cb4bf0f-6f89-4f17-bf81-9740fac3d374: !Template
+    answer_choices: null
+    id: 0cb4bf0f-6f89-4f17-bf81-9740fac3d374
+    jinja: 'Possible categories:
+
+      - {{ ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"]  |
+      join("\n- ") }}
+
+
+      Context:
+
+      {{context}}
+
+
+      Which of the category corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q
+    reference: ''
+  61d21137-d2b6-42a4-b682-50e92be1ec2f: !Template
+    answer_choices: null
+    id: 61d21137-d2b6-42a4-b682-50e92be1ec2f
+    jinja: 'Question:
+
+      {{question}}
+
+
+      On a scale of 1 to 5 (1 being the most subjective), how subjective is the question?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score
+    reference: ''
+  892f6eeb-170e-42b7-8291-8317fa937fe7: !Template
+    answer_choices: null
+    id: 892f6eeb-170e-42b7-8291-8317fa937fe7
+    jinja: '{{question}}
+
+
+      Answer using extracts from the following context. If you can''t find an answer,
+      return {{"Unanswerable"}}
+
+
+      Context:
+
+      {{context}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_after
+    reference: ''
+  8de6ddd1-17d9-4eac-bb91-78a2f0d57f92: !Template
+    answer_choices: null
+    id: 8de6ddd1-17d9-4eac-bb91-78a2f0d57f92
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Question:
+
+      {{question}}
+
+
+      How would you rate the subjectivity of the question (on a 1 to 5 scale with
+      1 being the most subjective)?
+
+
+      |||
+
+
+      {{question_subj_level}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: q_subj_score_with_context
+    reference: ''
+  c67ab028-02fe-4a15-86a8-6c04a8b315f1: !Template
+    answer_choices: null
+    id: c67ab028-02fe-4a15-86a8-6c04a8b315f1
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Which of {{"books, electronics, grocery, movies, restaurants or tripadvisor"}}
+      corresponds to the context?
+
+
+      |||
+
+
+      {{domain}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: domain_q_v2
+    reference: ''
+  cb18c33c-44ae-43f6-856d-37644e425795: !Template
+    answer_choices: null
+    id: cb18c33c-44ae-43f6-856d-37644e425795
+    jinja: 'Context:
+
+      {{context}}
+
+
+      Answer the following question with extracts from the context: {{question}}
+
+
+      |||
+
+      {% if (answers["text"]  | length) == 0 %}
+
+      {{ "Unanswerable" }}
+
+      {% else %}
+
+      {{answers["text"] | join(" \n ")}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: answer_q_with_context_first
+    reference: ''
diff --git a/promptsource/templates/super_glue/axb/templates.yaml b/promptsource/templates/super_glue/axb/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..56e3c144461162f2058981a32a34cbe6bfa38326
--- /dev/null
+++ b/promptsource/templates/super_glue/axb/templates.yaml
@@ -0,0 +1,125 @@
+dataset: super_glue
+subset: axb
+templates:
+  1ae41916-7b4d-4ef3-b414-bfadd95d67e2: !Template
+    answer_choices: Yes ||| No
+    id: 1ae41916-7b4d-4ef3-b414-bfadd95d67e2
+    jinja: 'Given {{sentence1}} Should we assume that "{{sentence2}}" is true? Yes
+      or no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: should assume
+    reference: Webson & Pavlick 2021
+  1b2d6e85-a5a9-4d1b-9e3b-630b490c6a34: !Template
+    answer_choices: Yes ||| No
+    id: 1b2d6e85-a5a9-4d1b-9e3b-630b490c6a34
+    jinja: '{{sentence1}} Are we justified in saying that "{{sentence2}}"? Yes or
+      no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: justified in saying
+    reference: Webson & Pavlick 2021
+  23651f68-93cc-441f-b826-30dd2c6d6a93: !Template
+    answer_choices: Yes ||| No
+    id: 23651f68-93cc-441f-b826-30dd2c6d6a93
+    jinja: Given that {{sentence1}} Does it follow that {{sentence2}} Yes or no? |||
+      {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does it follow that
+    reference: v0.1
+  552d6c20-ab5b-462f-b5fb-3c7b80c78dcc: !Template
+    answer_choices: Yes ||| No
+    id: 552d6c20-ab5b-462f-b5fb-3c7b80c78dcc
+    jinja: '{{sentence1}} Using only the above description and what you know about
+      the world, is "{{sentence2}}" definitely correct? Yes or no? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: MNLI crowdsource
+    reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+  908be561-caf4-4416-9fe9-9919c3998681: !Template
+    answer_choices: Yes ||| No
+    id: 908be561-caf4-4416-9fe9-9919c3998681
+    jinja: 'Given {{sentence1}} Is it guaranteed true that "{{sentence2}}"? Yes or
+      no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: guaranteed true
+    reference: Webson & Pavlick 2021
+  bae54ef5-c3be-4862-bdd4-a559ed04eb31: !Template
+    answer_choices: Yes ||| No
+    id: bae54ef5-c3be-4862-bdd4-a559ed04eb31
+    jinja: 'Suppose {{sentence1}} Can we infer that "{{sentence2}}"? Yes or no? |||
+      {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: can we infer
+    reference: Webson & Pavlick 2021
+  c92d765f-83b1-4684-a0a3-580929b5e46b: !Template
+    answer_choices: Yes ||| No
+    id: c92d765f-83b1-4684-a0a3-580929b5e46b
+    jinja: "{{sentence1}} \n\nQuestion: Does this imply that \"{{sentence2}}\"? Yes\
+      \ or no? ||| {{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does this imply
+    reference: v0.1
+  cb68ee27-c0a3-440b-b595-e90fe89539c3: !Template
+    answer_choices: Yes ||| No
+    id: cb68ee27-c0a3-440b-b595-e90fe89539c3
+    jinja: 'Given that {{sentence1}} Therefore, it must be true that "{{sentence2}}"?
+      Yes or no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: must be true
+    reference: v0.1
+  d57550ef-2f67-46eb-98cb-432dd135be16: !Template
+    answer_choices: Yes ||| No
+    id: d57550ef-2f67-46eb-98cb-432dd135be16
+    jinja: '{{sentence1}} Based on the previous passage, is it true that "{{sentence2}}"?
+      Yes or no? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+  d965164b-fa96-41b5-8852-e0f6dfe5524e: !Template
+    answer_choices: True ||| False
+    id: d965164b-fa96-41b5-8852-e0f6dfe5524e
+    jinja: '{{sentence1}}
+
+      Question: {{sentence2}} True or False? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style
+    reference: Same as reported in Figure G31 of the GPT-3 paper.
diff --git a/promptsource/templates/super_glue/axg/templates.yaml b/promptsource/templates/super_glue/axg/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3758d3c7f3ec359db42b18414f5012b068f4267d
--- /dev/null
+++ b/promptsource/templates/super_glue/axg/templates.yaml
@@ -0,0 +1,125 @@
+dataset: super_glue
+subset: axg
+templates:
+  0f530aa8-b254-4687-8032-bab1a65610c0: !Template
+    answer_choices: Yes ||| No
+    id: 0f530aa8-b254-4687-8032-bab1a65610c0
+    jinja: 'Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes
+      or no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: should assume
+    reference: Webson & Pavlick 2021
+  0f8afaef-19a0-472f-9e9f-c803426f8f22: !Template
+    answer_choices: Yes ||| No
+    id: 0f8afaef-19a0-472f-9e9f-c803426f8f22
+    jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes\
+      \ or no? ||| {{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does this imply
+    reference: v0.1
+  3b7a57e0-7733-4b21-9bed-a381fdc2415f: !Template
+    answer_choices: Yes ||| No
+    id: 3b7a57e0-7733-4b21-9bed-a381fdc2415f
+    jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+      Yes or no? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+  4361cf07-1b58-478f-b97c-3b140832fb77: !Template
+    answer_choices: Yes ||| No
+    id: 4361cf07-1b58-478f-b97c-3b140832fb77
+    jinja: 'Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+      Yes or no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: must be true
+    reference: v0.1
+  626823f5-ff12-46d5-9e68-b2dc4bfe7cd4: !Template
+    answer_choices: True ||| False
+    id: 626823f5-ff12-46d5-9e68-b2dc4bfe7cd4
+    jinja: '{{premise}}
+
+      Question: {{hypothesis}} True or False? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style
+    reference: Same as reported in Figure G31 of the GPT-3 paper.
+  7e1439f6-d54d-43e6-bdc7-306ad5fd9203: !Template
+    answer_choices: Yes ||| No
+    id: 7e1439f6-d54d-43e6-bdc7-306ad5fd9203
+    jinja: 'Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes or
+      no? ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: guaranteed true
+    reference: Webson & Pavlick 2021
+  c008c778-7621-496e-baa3-7b5817400659: !Template
+    answer_choices: Yes ||| No
+    id: c008c778-7621-496e-baa3-7b5817400659
+    jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes or no? |||
+      {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does it follow that
+    reference: v0.1
+  d4a1dd92-e184-4843-bc1f-1f625c833249: !Template
+    answer_choices: Yes ||| No
+    id: d4a1dd92-e184-4843-bc1f-1f625c833249
+    jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes or no?
+      ||| {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: justified in saying
+    reference: Webson & Pavlick 2021
+  db13469f-7161-4670-8a59-8c1137d1fa8b: !Template
+    answer_choices: Yes ||| No
+    id: db13469f-7161-4670-8a59-8c1137d1fa8b
+    jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes or no? |||
+      {{ answer_choices[label] }} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: can we infer
+    reference: Webson & Pavlick 2021
+  e21f5367-0cc8-412d-b8d9-78548438a384: !Template
+    answer_choices: Yes ||| No
+    id: e21f5367-0cc8-412d-b8d9-78548438a384
+    jinja: '{{premise}} Using only the above description and what you know about the
+      world, is "{{hypothesis}}" definitely correct? Yes or no? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: MNLI crowdsource
+    reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
diff --git a/promptsource/templates/super_glue/boolq/templates.yaml b/promptsource/templates/super_glue/boolq/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3a7ae0745097fe024173f72c1e5032b61cff9372
--- /dev/null
+++ b/promptsource/templates/super_glue/boolq/templates.yaml
@@ -0,0 +1,170 @@
+dataset: super_glue
+subset: boolq
+templates:
+  3e386463-1715-4578-9cba-07d11a0d3b61: !Template
+    answer_choices: False ||| True
+    id: 3e386463-1715-4578-9cba-07d11a0d3b61
+    jinja: 'Passage: {{passage}}
+
+
+      After reading this passage, I have a question: {{question}}? True or False?
+      |||
+
+      {% if label != -1 %}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: after_reading
+    reference: ''
+  492f0f88-4370-46cd-839b-1de37a55aeda: !Template
+    answer_choices: No ||| Yes
+    id: 492f0f88-4370-46cd-839b-1de37a55aeda
+    jinja: "{{ passage }} \nQuestion: {{ question }}\nAnswer: ||| \n{% if label !=\
+      \ -1 %}\n{{ answer_choices[label] }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 Style
+    reference: Same as Figure G29, p. 58 of the GPT-3 paper
+  6cb6a026-c070-470a-b75d-bb8fdf424e35: !Template
+    answer_choices: No ||| Yes
+    id: 6cb6a026-c070-470a-b75d-bb8fdf424e35
+    jinja: "{{ passage }} \n\nHaving read that, I wonder {{ question }}? |||\n{% if\
+      \ label != -1 %}\n{{ answer_choices[label] }} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "I wonder\u2026"
+    reference: ''
+  7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5: !Template
+    answer_choices: No ||| Yes
+    id: 7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5
+    jinja: 'Text: {{passage}}
+
+
+      Answer the following yes/no question: {{question}}? Yes or no? |||
+
+      {% if label != -1 %}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: yes_no_question
+    reference: ''
+  7d21d974-0624-4d4f-9e8c-644e2d009cb5: !Template
+    answer_choices: No ||| Yes
+    id: 7d21d974-0624-4d4f-9e8c-644e2d009cb5
+    jinja: "{{ passage }} \n\nHaving read that, could you tell me {{ question }}?\
+      \ ||| {% if label != -1 %}{{ answer_choices[label] }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "could you tell me\u2026"
+    reference: ''
+  922d3e87-ac58-4731-84d1-f0a40e47afb5: !Template
+    answer_choices: No ||| Yes
+    id: 922d3e87-ac58-4731-84d1-f0a40e47afb5
+    jinja: "EXAM\n1. Answer by yes or no.\n\nDocument: {{passage}}\nQuestion: {{question}}?\
+      \ ||| \n{% if label != -1 %}\n{{answer_choices[label]}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: exam
+    reference: ''
+  9a1bf459-8047-437c-9def-f21e960429cc: !Template
+    answer_choices: No ||| Yes
+    id: 9a1bf459-8047-437c-9def-f21e960429cc
+    jinja: 'Based on the following passage, {{ question }}? {{ passage }}
+
+
+      |||
+
+      {% if label != -1 %}
+
+      {{ answer_choices[label] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: based on the following passage
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  9f4c6b0a-437b-40c0-b467-db4b7218d38d: !Template
+    answer_choices: False ||| True
+    id: 9f4c6b0a-437b-40c0-b467-db4b7218d38d
+    jinja: 'Exercise: read the text and answer the question by True or False.
+
+
+      Text: {{passage}}
+
+      Question: {{question}}? |||
+
+      {% if label != -1 %}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: exercise
+    reference: ''
+  b2b3cb60-d6e3-491c-a09a-8201e13e417e: !Template
+    answer_choices: No ||| Yes
+    id: b2b3cb60-d6e3-491c-a09a-8201e13e417e
+    jinja: '{{ passage }}
+
+      Based on the previous passage, {{ question }}? ||| {% if label != -1 %}{{ answer_choices[label]
+      }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  eb78772c-e81e-4b8a-a77b-b75efd1c212a: !Template
+    answer_choices: False ||| True
+    id: eb78772c-e81e-4b8a-a77b-b75efd1c212a
+    jinja: '{{passage}}
+
+
+      Q: {{question}}? True or False? |||
+
+      {% if label != -1 %}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: valid_binary
+    reference: ''
diff --git a/promptsource/templates/super_glue/cb/templates.yaml b/promptsource/templates/super_glue/cb/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..63a4ba5bf9b55703d6c23fbc09c3c923e7f4ea1c
--- /dev/null
+++ b/promptsource/templates/super_glue/cb/templates.yaml
@@ -0,0 +1,196 @@
+dataset: super_glue
+subset: cb
+templates:
+  2e76cd0f-68ca-4f03-83ed-11cf15b25a84: !Template
+    answer_choices: Yes ||| No ||| Maybe
+    id: 2e76cd0f-68ca-4f03-83ed-11cf15b25a84
+    jinja: 'Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes, no, or maybe?
+      ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: can we infer
+    reference: Webson & Pavlick 2021
+  358860fd-61ad-45fd-92a6-a72ca9107ebc: !Template
+    answer_choices: Yes ||| No ||| Maybe
+    id: 358860fd-61ad-45fd-92a6-a72ca9107ebc
+    jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+      Yes, no, or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif
+      %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+  3f43a599-ffdb-490e-8952-c0ce41dd4621: !Template
+    answer_choices: True ||| False ||| Inconclusive
+    id: 3f43a599-ffdb-490e-8952-c0ce41dd4621
+    jinja: '{{premise}} Based on that information, is the claim: "{{hypothesis}}"
+      {{"true"}}, {{"false"}}, or {{"inconclusive"}}? ||| {% if label !=-1 %}{{ answer_choices[label]
+      }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: claim true/false/inconclusive
+    reference: Bers et al.
+  404eed25-558a-4d39-9515-7de46d60d4e0: !Template
+    answer_choices: Yes ||| No ||| Maybe
+    id: 404eed25-558a-4d39-9515-7de46d60d4e0
+    jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+      ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does it follow that
+    reference: v0.1
+  5c9b1fa9-93f0-4f82-b9e3-e0967e4d7260: !Template
+    answer_choices: Yes ||| No ||| Maybe
+    id: 5c9b1fa9-93f0-4f82-b9e3-e0967e4d7260
+    jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes, no,
+      or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: justified in saying
+    reference: Webson & Pavlick 2021
+  6b0c6191-183d-4731-8050-ab17c909335c: !Template
+    answer_choices: Always ||| Never ||| Sometimes
+    id: 6b0c6191-183d-4731-8050-ab17c909335c
+    jinja: Suppose it's true that {{premise}} Then, is "{{hypothesis}}" {{"always"}},
+      {{"sometimes"}}, or {{"never"}} true? ||| {% if label !=-1 %}{{ answer_choices[label]
+      }}{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: always/sometimes/never
+    reference: Bers et al.
+  75db2bc2-3caa-4956-9653-13c7dd6255df: !Template
+    answer_choices: True ||| False ||| Neither
+    id: 75db2bc2-3caa-4956-9653-13c7dd6255df
+    jinja: '{{premise}}
+
+      Question: {{hypothesis}} True, False, or Neither? ||| {% if label !=-1 %}{{
+      answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style
+    reference: 'Same as reported in Figure G7 of the GPT-3 paper, except that there
+      is no task identifying tokens like "anli R1: ".'
+  87237a07-7cce-470a-80ac-3e5e3a5283ba: !Template
+    answer_choices: Always ||| Never ||| Sometimes
+    id: 87237a07-7cce-470a-80ac-3e5e3a5283ba
+    jinja: "{{premise}} \n\nKeeping in mind the above text, consider: {{hypothesis}}\
+      \ Is this {{\"always\"}}, {{\"sometimes\"}}, or {{\"never\"}} correct? ||| {%\
+      \ if label !=-1 %}{{ answer_choices[label] }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: consider always/sometimes/never
+    reference: Bers et al.
+  8798b8a4-1f59-4c72-9c1b-3e3044a7462a: !Template
+    answer_choices: Yes ||| No ||| Maybe
+    id: 8798b8a4-1f59-4c72-9c1b-3e3044a7462a
+    jinja: Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes, no,
+      or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: guaranteed true
+    reference: Webson & Pavlick 2021
+  8e3b8d3d-1362-47dc-922a-82c03f965989: !Template
+    answer_choices: Yes ||| No ||| Maybe
+    id: 8e3b8d3d-1362-47dc-922a-82c03f965989
+    jinja: Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+      Yes, no, or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif
+      %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: must be true
+    reference: v0.1
+  90ab1002-093c-4e54-b48f-626655e36b65: !Template
+    answer_choices: Guaranteed ||| Impossible ||| Possible
+    id: 90ab1002-093c-4e54-b48f-626655e36b65
+    jinja: "Assume it is true that {{premise}} \n\nTherefore, \"{{hypothesis}}\" is\
+      \ {{\"guaranteed\"}}, {{\"possible\"}}, or {{\"impossible\"}}? ||| {% if label\
+      \ !=-1 %}{{ answer_choices[label] }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: guaranteed/possible/impossible
+    reference: Bers et al.
+  a485d120-6eef-4ff6-8684-42df1639b101: !Template
+    answer_choices: Yes ||| No ||| Maybe
+    id: a485d120-6eef-4ff6-8684-42df1639b101
+    jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes,\
+      \ no, or maybe? ||| {% if label !=-1 %}{{answer_choices[label]}}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does this imply
+    reference: v0.1
+  bee62bfa-5307-4e1c-97b2-2ad2f7bcb179: !Template
+    answer_choices: Correct ||| Incorrect ||| Inconclusive
+    id: bee62bfa-5307-4e1c-97b2-2ad2f7bcb179
+    jinja: '{{premise}} Using only the above description and what you know about the
+      world, "{{hypothesis}}" is definitely correct, incorrect, or inconclusive? |||
+      {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: MNLI crowdsource
+    reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+  e503b148-8e6c-43b5-9ed6-312794c54d9b: !Template
+    answer_choices: Yes ||| No ||| Maybe
+    id: e503b148-8e6c-43b5-9ed6-312794c54d9b
+    jinja: Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes,
+      no, or maybe? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: should assume
+    reference: Webson & Pavlick 2021
+  ea56b7f3-6e07-45bc-b619-c527eac4a41b: !Template
+    answer_choices: True ||| False ||| Inconclusive
+    id: ea56b7f3-6e07-45bc-b619-c527eac4a41b
+    jinja: 'Take the following as truth: {{premise}}
+
+      Then the following statement: "{{hypothesis}}" is {{"true"}}, {{"false"}}, or
+      {{"inconclusive"}}? ||| {% if label !=-1 %}{{ answer_choices[label] }}{% endif
+      %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: take the following as truth
+    reference: Bers et al.
diff --git a/promptsource/templates/super_glue/copa/templates.yaml b/promptsource/templates/super_glue/copa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..23083492b74080823fc5a6f6914643a9f6b2b475
--- /dev/null
+++ b/promptsource/templates/super_glue/copa/templates.yaml
@@ -0,0 +1,187 @@
+dataset: super_glue
+subset: copa
+templates:
+  0edd8660-f299-4819-a5ac-633c11177228: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: 0edd8660-f299-4819-a5ac-633c11177228
+    jinja: 'Exercise: choose the most plausible alternative.
+
+
+      {{ premise }} {% if question == "cause" %} because... {% else %} so... {% endif
+      %}
+
+      - {{choice1}}
+
+      - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: exercise
+    reference: ''
+  150789fe-e309-47a1-82c9-0a4dc2c6b12b: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: 150789fe-e309-47a1-82c9-0a4dc2c6b12b
+    jinja: "{% if question == \"effect\" %} \n{{ premise }} What could happen next,\
+      \ \"{{ answer_choices[0] }}\" or \"{{ answer_choices[1] }}\"? ||| {% if label\
+      \ != -1 %}{{ answer_choices[label] }}{%endif%}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "\u2026What could happen next, C1 or C2?"
+    reference: ''
+  4d879cbe-2fd7-424a-9d78-3f5200313fba: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: 4d879cbe-2fd7-424a-9d78-3f5200313fba
+    jinja: "{{ premise }} \n\nI am hesitating between two options. Help me choose\
+      \ the more likely {% if question == \"cause\" %} cause: {% else %} effect: {%\
+      \ endif %}\n- {{choice1}}\n- {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label]\
+      \ }}{%endif%}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: i_am_hesitating
+    reference: ''
+  66ea075e-4d03-4a78-b1fa-9a5228cf0c9d: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: 66ea075e-4d03-4a78-b1fa-9a5228cf0c9d
+    jinja: '{{ premise }} {% if question == "cause" %} This happened because... {%
+      else %} As a consequence... {% endif %}
+
+      Help me pick the more plausible option:
+
+      - {{choice1}}
+
+      - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: plausible_alternatives
+    reference: ''
+  744047dc-1298-45a2-8d68-d67e3f834ded: !Template
+    answer_choices: '{{choice1 }} ||| {{choice2}}'
+    id: 744047dc-1298-45a2-8d68-d67e3f834ded
+    jinja: '"{{ answer_choices[0] }}" or "{{ answer_choices[1] }}"? {{ premise }}
+      {% if question == "cause" %} because {% else %} so {% endif %} ||| {% if label
+      != -1 %}{{ answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "C1 or C2? premise, so/because\u2026"
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  84da62c2-9440-4cfc-bdd4-d70c65e33a82: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: 84da62c2-9440-4cfc-bdd4-d70c65e33a82
+    jinja: "{% if question == \"effect\" %} \n{{ premise }} As a result, \"{{ answer_choices[0]\
+      \ }}\" or \"{{ answer_choices[1] }}\"? ||| {% if label != -1 %}{{ answer_choices[label]\
+      \ }}{%endif%}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "\u2026As a result, C1 or C2?"
+    reference: ''
+  8ce80f8a-239e-4393-892c-f63dbb0d9929: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: 8ce80f8a-239e-4393-892c-f63dbb0d9929
+    jinja: "{{ premise }} \n\nWhat's the best option?\n- {{choice1}}\n- {{choice2}}\n\
+      \nWe are looking for {% if question == \"cause\" %} a cause {% else %} an effect\
+      \ {% endif %}\n||| {% if label != -1 %}{{answer_choices[label]}}{%endif%}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: best_option
+    reference: ''
+  8cf2ba73-aee5-4651-b5d4-b1b88afe4abb: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: 8cf2ba73-aee5-4651-b5d4-b1b88afe4abb
+    jinja: "{% if question == \"cause\" %} \n{{ premise }} Which may be caused by\
+      \ \"{{ answer_choices[0] }}\" or \"{{ answer_choices[1] }}\"? ||| {% if label\
+      \ != -1 %}{{ answer_choices[label] }}{%endif%}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "\u2026which may be caused by"
+    reference: ''
+  a1f9951e-2b6b-4530-9636-9cdf4c1658c5: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: a1f9951e-2b6b-4530-9636-9cdf4c1658c5
+    jinja: 'Pick the more likely continuation to the following sentence:
+
+      {{ premise }} {% if question == "cause" %} as a result of: {% else %} as a consequence:
+      {% endif %}
+
+      - {{choice1}}
+
+      - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: more likely
+    reference: ''
+  a61d8c21-da25-47bf-b5fe-14a8edd650af: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: a61d8c21-da25-47bf-b5fe-14a8edd650af
+    jinja: '{{ premise }}
+
+
+      Select the most plausible {% if question == "cause" %} cause: {% else %} effect:
+      {% endif %}
+
+      - {{choice1}}
+
+      - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: cause_effect
+    reference: ''
+  a8bf11c3-bea2-45ba-a533-957d8bee5e2e: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: a8bf11c3-bea2-45ba-a533-957d8bee5e2e
+    jinja: "{% if question == \"cause\" %} \n{{ premise }} Why? \"{{ answer_choices[0]\
+      \ }}\" or \"{{ answer_choices[1] }}\"? ||| {% if label != -1 %}{{ answer_choices[label]\
+      \ }}{%endif%}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "\u2026why? C1 or C2"
+    reference: ''
+  f32348cd-d3cb-4619-87b9-e24f99c78567: !Template
+    answer_choices: '{{choice1}} ||| {{choice2}}'
+    id: f32348cd-d3cb-4619-87b9-e24f99c78567
+    jinja: '{{ premise }} {% if question == "cause" %} because... {% else %} so...
+      {% endif %}
+
+      Choose between:
+
+      - {{choice1}}
+
+      - {{choice2}} ||| {% if label != -1 %}{{ answer_choices[label] }}{%endif%}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choose
+    reference: ''
diff --git a/promptsource/templates/super_glue/multirc/templates.yaml b/promptsource/templates/super_glue/multirc/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..61bd6a954784de58c21ae22bc8d8d5eebb97d8bf
--- /dev/null
+++ b/promptsource/templates/super_glue/multirc/templates.yaml
@@ -0,0 +1,165 @@
+dataset: super_glue
+subset: multirc
+templates:
+  2d95962b-a545-41ae-8d76-07ee6704ef65: !Template
+    answer_choices: No ||| Yes
+    id: 2d95962b-a545-41ae-8d76-07ee6704ef65
+    jinja: '{{paragraph}}
+
+
+      Question: {{question}}
+
+      I found this answer "{{answer}}". Is that correct? Yes or no?
+
+      |||
+
+      {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: found_this_answer
+    reference: ''
+  42d47df9-09de-4691-8e49-7cfadd636cdd: !Template
+    answer_choices: No ||| Yes
+    id: 42d47df9-09de-4691-8e49-7cfadd636cdd
+    jinja: "{{ paragraph }}\nBased on the previous passage, {{ question }} \nIs \"\
+      {{ answer }}\" a correct answer? ||| {% if label != -1 %}{{ answer_choices[label]\
+      \ }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "is\u2026 a correct answer?"
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  431a5c97-af33-4053-83c8-afb0dfc04448: !Template
+    answer_choices: No ||| Yes
+    id: 431a5c97-af33-4053-83c8-afb0dfc04448
+    jinja: '{{paragraph}}
+
+      Question: {{question}}
+
+
+      I am grading my students'' exercises. Is the answer "{{answer}}" correct?
+
+      |||
+
+      {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: grading
+    reference: ''
+  4fc9e1ea-7451-4dba-a2cb-ce870e35ef8b: !Template
+    answer_choices: No ||| Yes
+    id: 4fc9e1ea-7451-4dba-a2cb-ce870e35ef8b
+    jinja: "{{ paragraph }}\n{{ question }} \nWould it be good to answer \"{{ answer\
+      \ }}\"? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "Would it be good to answer\u2026"
+    reference: ''
+  59a2d847-27f3-4002-a125-cf9a291b3098: !Template
+    answer_choices: No ||| Yes
+    id: 59a2d847-27f3-4002-a125-cf9a291b3098
+    jinja: "{{ paragraph }}\nQuestion: {{ question }} \nIs it {{ answer }}? ||| {%\
+      \ if label != -1 %}{{ answer_choices[label] }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "paragraph\u2026 question\u2026 is it\u2026 ?"
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  7bf537ea-ff8d-44c7-8fc9-305b35e3be66: !Template
+    answer_choices: No ||| Yes
+    id: 7bf537ea-ff8d-44c7-8fc9-305b35e3be66
+    jinja: '{{paragraph}}
+
+
+      Decide whether "{{answer}}" is a valid answer to the following question: {{question}}
+
+      Answer yes or no.
+
+      |||
+
+      {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: decide_valid
+    reference: ''
+  7d878b89-2774-429a-82fb-ac801379e3ae: !Template
+    answer_choices: No ||| Yes
+    id: 7d878b89-2774-429a-82fb-ac801379e3ae
+    jinja: "{{ paragraph }}\nQuestion: {{ question }} \nIs the correct answer {{ answer\
+      \ }}? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "is the correct answer\u2026"
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  ae9b2b0b-1731-4370-adcc-36c4a959490d: !Template
+    answer_choices: No ||| Yes
+    id: ae9b2b0b-1731-4370-adcc-36c4a959490d
+    jinja: 'Is "{{answer}}" a correct answer to the following question?
+
+      Question: {{question}}
+
+
+      Rely on the following text: {{paragraph}}
+
+      |||
+
+      {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: correct
+    reference: ''
+  b63fd1c3-b4a6-43c3-8429-6a389235b2a4: !Template
+    answer_choices: No ||| Yes
+    id: b63fd1c3-b4a6-43c3-8429-6a389235b2a4
+    jinja: '{{paragraph}}
+
+
+      Question: {{question}}
+
+      I think "{{answer}}" is a valid answer. Could you confirm? Yes or no?
+
+      |||
+
+      {% if label != -1 %}{{answer_choices[label]}}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: confirm
+    reference: ''
+  d2d78b88-8845-45b5-935a-6451da00b285: !Template
+    answer_choices: No ||| Yes
+    id: d2d78b88-8845-45b5-935a-6451da00b285
+    jinja: "{{ paragraph }}\n{{ question }} \nI was going to say \"{{ answer }}\"\
+      . Does that sound right? ||| {% if label != -1 %}{{ answer_choices[label] }}{%\
+      \ endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: "I was going to say\u2026"
+    reference: ''
diff --git a/promptsource/templates/super_glue/record/templates.yaml b/promptsource/templates/super_glue/record/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8d2511f8af50841bcaa9dc052d3302bf30fac6f8
--- /dev/null
+++ b/promptsource/templates/super_glue/record/templates.yaml
@@ -0,0 +1,329 @@
+dataset: super_glue
+subset: record
+templates:
+  014b669e-2e3b-40ce-bdde-418966c7d666: !Template
+    answer_choices: '{{ entities | join("|||") }}'
+    id: 014b669e-2e3b-40ce-bdde-418966c7d666
+    jinja: "{{ passage }} \n{{ query }} \nWhich one is the \"{{\"@placeholder\"}}\"\
+      ? {{ entities | join(\", \") }}? ||| {% if ( answers | length ) > 0 %} {{ answers\
+      \ | choice }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Squad
+      original_task: true
+    name: Which one is the placeholder?
+    reference: ''
+  11e27d59-b1f5-43a1-9ccc-17f1c3249173: !Template
+    answer_choices: '{{ entities | join("|||") }}'
+    id: 11e27d59-b1f5-43a1-9ccc-17f1c3249173
+    jinja: "The following document has been corrupted. Tell me what \"{{\"@placeholder\"\
+      }}\" is referring to.\n\nDocument: {{ passage }} \n{{ query }} \n||| {% if (\
+      \ answers | length ) > 0 %}{{ answers | choice }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: corrupted
+    reference: ''
+  147656b2-2dad-4028-96c4-f19d57cd1344: !Template
+    answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: 147656b2-2dad-4028-96c4-f19d57cd1344
+    jinja: "Summary:\n\n- {{ passage.split(\"@highlight\")[1:] | join(\"\\n- \") }}\
+      \ \n\nArticle:\n\n{{ passage.split(\"@highlight\")[0] }}\n ||| {% if ( answers\
+      \ | length ) > 0 %}{{ query | replace(\"@placeholder\", answers | choice) }}\
+      \ {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Summary first (continuation choices)
+    reference: ''
+  24c267d4-359e-40a9-83d2-bff904d63b09: !Template
+    answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: 24c267d4-359e-40a9-83d2-bff904d63b09
+    jinja: "Summary:\n\n- {{ passage.split(\"@highlight\")[1:] | join(\"\\n- \") }}\
+      \ \n\nArticle:\n\n{{ passage.split(\"@highlight\")[0] }}\n\nNow that you've\
+      \ read the article, please write a new sentence to add to it.\n\n||| {% if (\
+      \ answers | length ) > 0 %}{{ query | replace(\"@placeholder\", answers | choice)\
+      \ }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Add sentence after after (continuation choices)
+    reference: ''
+  441c70e3-095a-44a1-8163-bc3b666b7ea1: !Template
+    answer_choices: '{{ entities | join("|||") }}'
+    id: 441c70e3-095a-44a1-8163-bc3b666b7ea1
+    jinja: "{{ passage }} \n{{ query }} \n\nYou should decide what \"{{\"@placeholder\"\
+      }}\" is referring to. Choose between:\n- {{answer_choices | join(\"\\n- \")}}\n\
+      ||| {% if ( answers | length ) > 0 %}{{ answers | choice }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Squad
+      original_task: true
+    name: choose_between
+    reference: ''
+  64013fb3-1afd-4e5a-8777-b164ca3b8e18: !Template
+    answer_choices: '{% for entity in entities[:-1] %} - {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} - {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: 64013fb3-1afd-4e5a-8777-b164ca3b8e18
+    jinja: "{{ passage.split(\"@highlight\")[0] }}\n\nSummary:\n\n- {{ passage.split(\"\
+      @highlight\")[1:] | join(\"\\n- \") }} \n\n ||| {% if ( answers | length ) >\
+      \ 0 %}- {{ query | replace(\"@placeholder\", answers | choice) }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style summary only (continuation choices)
+    reference: Brown et al. 2020
+  90fc9ecb-c706-4c03-bb7e-4fe9fcd777f6: !Template
+    answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: 90fc9ecb-c706-4c03-bb7e-4fe9fcd777f6
+    jinja: "Article:\n\n{{ passage.split(\"@highlight\")[0] }}\n\nHighlights:\n\n\
+      {{ passage.split(\"@highlight\")[1:] | join(\"\\n\") }} \n\n ||| {% if ( answers\
+      \ | length ) > 0 %}{{ query | replace(\"@placeholder\", answers | choice) }}\
+      \ {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: GPT-3 style with labels without hyphens (continuation choices)
+    reference: ''
+  91555c1c-c1e4-469b-a2a4-fc952ce1a145: !Template
+    answer_choices: '{{ entities | join("|||") }}'
+    id: 91555c1c-c1e4-469b-a2a4-fc952ce1a145
+    jinja: "{{ passage }} \n{{ query }} \nIn the question above, the \"{{\"@placeholder\"\
+      }}\" stands for ||| {% if ( answers | length ) > 0 %}{{ answers | choice }}{%\
+      \ endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: In the question above, the placeholder stands for
+    reference: ''
+  94577b75-2eac-4eae-b367-3b413c4188c6: !Template
+    answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: 94577b75-2eac-4eae-b367-3b413c4188c6
+    jinja: 'After reading the article, write another sentence to add to it.
+
+      {{ passage | replace("@highlight", "\n- ") }}
+
+
+      ||| {% if ( answers | length ) > 0 %}{{ query | replace("@placeholder", answers
+      | choice) }}{% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Add sentence after (continuation choices)
+    reference: ''
+  9579b54e-4f0f-4e43-8907-af57112cc857: !Template
+    answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: 9579b54e-4f0f-4e43-8907-af57112cc857
+    jinja: "Please read the following news article and write another sentence to add\
+      \ to it.\n\n{{ passage | replace(\"@highlight\", \"\\n- \") }} \n ||| {% if\
+      \ ( answers | length ) > 0 %}{{ query | replace(\"@placeholder\", answers |\
+      \ choice) }} {% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: News article (continuation choices)
+    reference: ''
+  99dd38ce-32f3-4d58-93c5-59821002b9cc: !Template
+    answer_choices: '{{ entities | join("|||") }}'
+    id: 99dd38ce-32f3-4d58-93c5-59821002b9cc
+    jinja: "{{ passage }} \n{{ query }} \nWhat could the \"{{\"@placeholder\"}}\"\
+      \ be? {{ entities | join(\", \") }}? ||| {% if ( answers | length ) > 0 %}{{\
+      \ answers | choice }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Squad
+      original_task: true
+    name: What could the placeholder be?
+    reference: ''
+  9b688cf3-28bf-4f33-94cf-e73e4fa8c608: !Template
+    answer_choices: '{{entities | join("|||")}}'
+    id: 9b688cf3-28bf-4f33-94cf-e73e4fa8c608
+    jinja: '{{ passage }}
+
+      {{ query }}
+
+
+      I am trying to decide what "{{"@placeholder"}}" means in the previous text.
+
+      Help by choosing an option between:
+
+      - {{ entities | join("\n- ") }}
+
+      ||| {% if ( answers | length ) > 0 %}
+
+      {{ answers | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Squad
+      original_task: true
+    name: trying_to_decide
+    reference: ''
+  a5ed27ed-162b-4ac1-9c7a-85059d5214be: !Template
+    answer_choices: '{{ entities | join("|||") }}'
+    id: a5ed27ed-162b-4ac1-9c7a-85059d5214be
+    jinja: "{{ passage }} \n{{ query }} \nHere, the placeholder refers to ||| {% if\
+      \ ( answers | length ) > 0 %}{{ answers | choice }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: "the placeholder refers to\u2026"
+    reference: ''
+  a99a92e0-e1ee-4ec3-a38a-3be4303ba017: !Template
+    answer_choices: '{% for entity in entities[:-1] %} - {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} - {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: a99a92e0-e1ee-4ec3-a38a-3be4303ba017
+    jinja: "{{ passage.split(\"@highlight\")[0] }}\n\nHighlights:\n\n- {{ passage.split(\"\
+      @highlight\")[1:] | join(\"\\n- \") }} \n\nPlease write an additional highlight.\n\
+      \ ||| {% if ( answers | length ) > 0 %}- {{ query | replace(\"@placeholder\"\
+      , answers | choice) }} {% endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: New highlight (continuation choices)
+    reference: ''
+  d3fce74e-0d9d-404a-a009-9ebbf5794c2c: !Template
+    answer_choices: '{{entities | join("|||")}}'
+    id: d3fce74e-0d9d-404a-a009-9ebbf5794c2c
+    jinja: 'Exercise: Extract from the text the correct entity that "{{"@placeholder"}}"
+      is referring to.
+
+
+      {{ passage }}
+
+      {{ query }}
+
+      ||| {% if ( answers | length ) > 0 %}
+
+      {{ answers | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: exercise
+    reference: ''
+  de5b635e-c2f4-40bb-81ac-650f1b45564b: !Template
+    answer_choices: '{{entities | join("|||")}}'
+    id: de5b635e-c2f4-40bb-81ac-650f1b45564b
+    jinja: '{{ passage }}
+
+      {{ query }}
+
+
+      Pick one option, "{{"@placeholder"}}" refers to:
+
+      - {{answer_choices | join("\n- ")}}
+
+      ||| {% if ( answers | length ) > 0 %}
+
+      {{ answers | choice }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Squad
+      original_task: true
+    name: pick_one_option
+    reference: ''
+  df8d0822-2cad-42de-8191-687ae47f6098: !Template
+    answer_choices: '{% for entity in entities[:-1] %} - {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} - {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: df8d0822-2cad-42de-8191-687ae47f6098
+    jinja: "{{ passage | replace(\"@highlight\", \"\\n- \") }} \n\n ||| {% if ( answers\
+      \ | length ) > 0 %}- {{ query | replace(\"@placeholder\", answers | choice)\
+      \ }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style (continuation choices)
+    reference: Brown et al. 2020
+  dfa3052f-ede8-42c2-b99a-bc5762c4fdc6: !Template
+    answer_choices: '{% for entity in entities[:-1] %} - {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} - {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: dfa3052f-ede8-42c2-b99a-bc5762c4fdc6
+    jinja: "Article:\n\n{{ passage.split(\"@highlight\")[0] }}\n\nHighlights:\n\n\
+      - {{ passage.split(\"@highlight\")[1:] | join(\"\\n- \") }} \n\n ||| {% if (\
+      \ answers | length ) > 0 %}- {{ query | replace(\"@placeholder\", answers |\
+      \ choice) }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: GPT-3 style with labels (continuation choices)
+    reference: Brown et al. 2020
+  e68d13c5-df75-4de0-b59e-f2eaf4af6ce7: !Template
+    answer_choices: '{{ entities | join("|||") }}'
+    id: e68d13c5-df75-4de0-b59e-f2eaf4af6ce7
+    jinja: "{{ passage }} \n{{ query }} \nCan you figure out what does the \"{{\"\
+      @placeholder\"}}\" mean? It means ||| {% if ( answers | length ) > 0 %}{{ answers\
+      \ | choice }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: "Can you figure out\u2026"
+    reference: ''
+  f7a92707-c531-42cb-81b4-063976e013cb: !Template
+    answer_choices: '{% for entity in entities[:-1] %} {{ query | replace("@placeholder",
+      entity) }} ||| {% endfor %} {{ query | replace("@placeholder", entities[-1])
+      }}'
+    id: f7a92707-c531-42cb-81b4-063976e013cb
+    jinja: "{{ passage | replace(\"@highlight\", \"\\n\") }} \n ||| {% if ( answers\
+      \ | length ) > 0 %}{{ query | replace(\"@placeholder\", answers | choice) }}\
+      \ {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style without hyphens (continuation choices)
+    reference: Brown et al. 2020
diff --git a/promptsource/templates/super_glue/rte/templates.yaml b/promptsource/templates/super_glue/rte/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..08613c7e30170c17e0bcfa0c9c3fc96336d12193
--- /dev/null
+++ b/promptsource/templates/super_glue/rte/templates.yaml
@@ -0,0 +1,126 @@
+dataset: super_glue
+subset: rte
+templates:
+  2b52a83c-0021-41fe-b44c-5aaa076d71a2: !Template
+    answer_choices: Yes ||| No
+    id: 2b52a83c-0021-41fe-b44c-5aaa076d71a2
+    jinja: '{{premise}} Using only the above description and what you know about the
+      world, is "{{hypothesis}}" definitely correct? Yes or no? ||| {% if label !=
+      -1 %}{{ answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: MNLI crowdsource
+    reference: Adapted from Williams et al. 2018's instructions to crowdsourcing workers.
+  2d0d63da-ffcf-4f6e-941a-b8da922be43e: !Template
+    answer_choices: Yes ||| No
+    id: 2d0d63da-ffcf-4f6e-941a-b8da922be43e
+    jinja: Given {{premise}} Is it guaranteed true that "{{hypothesis}}"? Yes or no?
+      ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: guaranteed true
+    reference: Webson & Pavlick 2021
+  4163e6f1-1a83-4c73-b867-02eb7ac80316: !Template
+    answer_choices: Yes ||| No
+    id: 4163e6f1-1a83-4c73-b867-02eb7ac80316
+    jinja: Suppose {{premise}} Can we infer that "{{hypothesis}}"? Yes or no? |||
+      {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: can we infer
+    reference: Webson & Pavlick 2021
+  8fb1c6aa-20e9-438c-bece-c6af1c746449: !Template
+    answer_choices: True ||| False
+    id: 8fb1c6aa-20e9-438c-bece-c6af1c746449
+    jinja: '{{premise}}
+
+      Question: {{hypothesis}} True or False? ||| {% if label != -1 %}{{ answer_choices[label]
+      }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 style
+    reference: Same as reported in Figure G31 of the GPT-3 paper.
+  9e078fb4-505b-413c-bb5e-3cd16ddcf5d7: !Template
+    answer_choices: Yes ||| No
+    id: 9e078fb4-505b-413c-bb5e-3cd16ddcf5d7
+    jinja: "{{premise}} \n\nQuestion: Does this imply that \"{{hypothesis}}\"? Yes\
+      \ or no? ||| {% if label != -1 %}{{answer_choices[label]}}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does this imply
+    reference: v0.1
+  b8dc85c6-28b6-4340-979a-8e77c2a0dde8: !Template
+    answer_choices: Yes ||| No
+    id: b8dc85c6-28b6-4340-979a-8e77c2a0dde8
+    jinja: Given {{premise}} Should we assume that "{{hypothesis}}" is true? Yes or
+      no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: should assume
+    reference: Webson & Pavlick 2021
+  e2fb58f2-b1f2-4aef-b74b-c4ee1c571fff: !Template
+    answer_choices: Yes ||| No
+    id: e2fb58f2-b1f2-4aef-b74b-c4ee1c571fff
+    jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes or no? |||
+      {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does it follow that
+    reference: v0.1
+  ed1f4b75-8826-4852-9bd6-aedf368678f5: !Template
+    answer_choices: Yes ||| No
+    id: ed1f4b75-8826-4852-9bd6-aedf368678f5
+    jinja: '{{premise}} Based on the previous passage, is it true that "{{hypothesis}}"?
+      Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+  ee0ce095-122a-4509-bf0b-33d1495295f7: !Template
+    answer_choices: Yes ||| No
+    id: ee0ce095-122a-4509-bf0b-33d1495295f7
+    jinja: '{{premise}} Are we justified in saying that "{{hypothesis}}"? Yes or no?
+      ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: justified in saying
+    reference: Webson & Pavlick 2021
+  fb4f8144-37f5-4977-88da-37a5d0bfd0e8: !Template
+    answer_choices: Yes ||| No
+    id: fb4f8144-37f5-4977-88da-37a5d0bfd0e8
+    jinja: Given that {{premise}} Therefore, it must be true that "{{hypothesis}}"?
+      Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: must be true
+    reference: v0.1
diff --git a/promptsource/templates/super_glue/wic/templates.yaml b/promptsource/templates/super_glue/wic/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..40e4c205f87541379ee69fdba0d0a3f65857d663
--- /dev/null
+++ b/promptsource/templates/super_glue/wic/templates.yaml
@@ -0,0 +1,218 @@
+dataset: super_glue
+subset: wic
+templates:
+  14e73f39-a0d1-44c2-b9a4-4e48f9f1608e: !Template
+    answer_choices: No ||| Yes
+    id: 14e73f39-a0d1-44c2-b9a4-4e48f9f1608e
+    jinja: 'Does the word "{{word}}" have the same meaning in these two sentences?
+      Yes, No?
+
+      {{sentence1}}
+
+      {{sentence2}}
+
+      ||| {% if label != -1%}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: question-context-meaning-with-label
+    reference: Generalized question-context format with label
+  3503ead5-4fa5-4f77-95dc-f0c2ed3eecdc: !Template
+    answer_choices: No ||| Yes
+    id: 3503ead5-4fa5-4f77-95dc-f0c2ed3eecdc
+    jinja: 'Does the word "{{word}}" have the same meaning in these two sentences?
+
+      {{sentence1}}
+
+      {{sentence2}}
+
+      ||| {% if label != -1%}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: question-context-meaning
+    reference: Generalized question-context format
+  611d13dc-d414-4b9b-9204-e4f325e859e7: !Template
+    answer_choices: No ||| Yes
+    id: 611d13dc-d414-4b9b-9204-e4f325e859e7
+    jinja: 'Homework
+
+
+      Decide whether the word "{{word}}" is used with the same meaning in the two
+      following sentences. Answer by yes or no.
+
+      {{sentence1}}
+
+      {{sentence2}}
+
+      ||| {% if label != -1%}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: grammar_homework
+    reference: ''
+  725b5ed0-7728-4890-95a4-a74cb7ae1bb4: !Template
+    answer_choices: False ||| True
+    id: 725b5ed0-7728-4890-95a4-a74cb7ae1bb4
+    jinja: 'Sentence A: {{sentence1}}
+
+      Sentence B: {{sentence2}}
+
+
+      "{{word}}" has a similar meaning in sentences A and B. True or False?
+
+      ||| {% if label != -1%}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: affirmation_true_or_false
+    reference: ''
+  c3a0a5d8-cfe9-4a7f-8a3c-3c526e0ad0c6: !Template
+    answer_choices: No ||| Yes
+    id: c3a0a5d8-cfe9-4a7f-8a3c-3c526e0ad0c6
+    jinja: '{{sentence1}}
+
+      {{sentence2}}
+
+      Question: Is the word ''{{word}}'' used in the same sense in the two sentences
+      above?
+
+      ||| {% if label != -1%}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3-prompt
+    reference: Following table G32. https://arxiv.org/pdf/2005.14165.pdf
+  ce8b5a93-1841-4897-84db-b100f1c84f4b: !Template
+    answer_choices: No ||| Yes
+    id: ce8b5a93-1841-4897-84db-b100f1c84f4b
+    jinja: 'Sentence 1: {{sentence1}}
+
+      Sentence 2: {{sentence2}}
+
+
+      Determine whether the word "{{word}}" is used in the same sense in both sentences.
+      Yes or no?
+
+      ||| {% if label != -1%}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: same_sense
+    reference: ''
+  cfbc1637-10b8-4f20-a31c-55292f3cebd0: !Template
+    answer_choices: No ||| Yes
+    id: cfbc1637-10b8-4f20-a31c-55292f3cebd0
+    jinja: "Determine if the word '{{word}}' is used in the same way in the two sentences\
+      \ below. \n{{sentence1}}\n{{sentence2}}\n||| {% if label != -1%}\n{{answer_choices[label]}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: question-context
+    reference: Generalized question-context format
+  d9e1db2a-ab0b-4621-bb41-01d5788d3873: !Template
+    answer_choices: No ||| Yes
+    id: d9e1db2a-ab0b-4621-bb41-01d5788d3873
+    jinja: '{{sentence1}}
+
+      {{sentence2}}
+
+      Question: Is the word ''{{word}}'' used in the same sense in the two sentences
+      above? Yes, No?
+
+      ||| {% if label != -1%}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3-prompt-with-label
+    reference: Following table G32. https://arxiv.org/pdf/2005.14165.pdf add additional
+      label
+  dd2080cf-3117-49ba-9aff-c988a21fdb69: !Template
+    answer_choices: No ||| Yes
+    id: dd2080cf-3117-49ba-9aff-c988a21fdb69
+    jinja: 'The word "{{word}}" has multiple meanings. Does it have the same meaning
+      in sentences 1 and 2? Yes or no?
+
+
+      Sentence 1: {{sentence1}}
+
+      Sentence 2: {{sentence2}}
+
+      ||| {% if label != -1%}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: polysemous
+    reference: ''
+  f934a96d-fe4d-4075-aa47-5595b9a604c7: !Template
+    answer_choices: No ||| Yes
+    id: f934a96d-fe4d-4075-aa47-5595b9a604c7
+    jinja: '{{sentence1}}
+
+      {{sentence2}}
+
+      Similar sense of {{word}}?
+
+      ||| {% if label != -1%}
+
+      {{answer_choices[label]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: similar-sense
+    reference: Following https://arxiv.org/abs/2105.11447, https://github.com/ethanjperez/true_few_shot/tree/main/templates.super_glue
diff --git a/promptsource/templates/super_glue/wsc.fixed/templates.yaml b/promptsource/templates/super_glue/wsc.fixed/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..30fb7ee347034f43bf1bb72ba54ad99cdde99e7b
--- /dev/null
+++ b/promptsource/templates/super_glue/wsc.fixed/templates.yaml
@@ -0,0 +1,135 @@
+dataset: super_glue
+subset: wsc.fixed
+templates:
+  212fb8b1-8436-4f64-8f37-a9094fe029f4: !Template
+    answer_choices: No ||| Yes
+    id: 212fb8b1-8436-4f64-8f37-a9094fe029f4
+    jinja: '{{ text }} In the previous sentence, does the pronoun "{{ span2_text.lower()
+      }}" refer to {{ span1_text }}? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label]
+      }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does the pronoun refer to
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  23361c5d-b67f-4c2a-9da7-16301c55d0e1: !Template
+    answer_choices: No ||| Yes
+    id: 23361c5d-b67f-4c2a-9da7-16301c55d0e1
+    jinja: '{{ text }} Here, by "{{ span2_text }}" they mean "{{ span1_text }}". Yes
+      or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: by p they mean
+    reference: ''
+  2f17f18b-6daa-44ef-a2dd-dddaf04aec0e: !Template
+    answer_choices: False ||| True
+    id: 2f17f18b-6daa-44ef-a2dd-dddaf04aec0e
+    jinja: "{{ text }} \n\nIn other words, {{ text.split(\" \")[span2_index:] | join(\"\
+      \ \") | replace(span2_text, span1_text) }} True or false? ||| {% if label !=\
+      \ -1 %}{{ answer_choices[label] }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: in other words
+    reference: ''
+  4b3e29cc-ccb8-4e4c-a845-4935ca29cf34: !Template
+    answer_choices: No ||| Yes
+    id: 4b3e29cc-ccb8-4e4c-a845-4935ca29cf34
+    jinja: '{{ text }} I think they mean "{{ text.split(" ")[span2_index:] | join("
+      ") | replace(span2_text, span1_text) }}" Yes or no? ||| {% if label != -1 %}{{
+      answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: I think they mean
+    reference: ''
+  7482d24f-cf45-4013-b82d-369489fc958b: !Template
+    answer_choices: No ||| Yes
+    id: 7482d24f-cf45-4013-b82d-369489fc958b
+    jinja: '{{ text }} Here, does "{{ span2_text.lower() }}" stand for {{ span1_text
+      }}? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does p stand for
+    reference: ''
+  7d377293-d043-4b6c-8ec1-d61eaf14ec67: !Template
+    answer_choices: No ||| Yes
+    id: 7d377293-d043-4b6c-8ec1-d61eaf14ec67
+    jinja: "Passage: {{ text }} \n\nQuestion: In the passage above, does the pronoun\
+      \ \"{{ span2_text }}\" refer to {{ span1_text }}?\n\nAnswer: ||| {% if label\
+      \ != -1 %}{{ answer_choices[label] }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 Style
+    reference: Adapted from Figure G33, p. 59, Brown et al. 2020
+  809eacd0-2f6c-4e3a-b52a-57c783879d36: !Template
+    answer_choices: No ||| Yes
+    id: 809eacd0-2f6c-4e3a-b52a-57c783879d36
+    jinja: '{{ text }} In the previous sentence, can the pronoun "{{ span2_text }}"
+      be replaced with "{{ span1_text }}"? Yes or no? ||| {% if label != -1 %}{{ answer_choices[label]
+      }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: replaced with
+    reference: ''
+  87f97aa0-1fa9-4f0b-b8e6-89d3c1f19bd6: !Template
+    answer_choices: False ||| True
+    id: 87f97aa0-1fa9-4f0b-b8e6-89d3c1f19bd6
+    jinja: "Context: {{ text }} \n\n{% if span2_text.lower()  == \"they\" or span2_text.lower()\
+      \ == \"them\" %}\nQuestion: \"{{ span2_text }}\" are {{ span1_text }}. True\
+      \ or false?\n{% else %}\nQuestion: \"{{ span2_text }}\" is {{ span1_text }}.\
+      \ True or false?\n{% endif %}\n\nAnswer: ||| {% if label != -1 %}{{ answer_choices[label]\
+      \ }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: p is/are r
+    reference: ''
+  aae24b54-c3a7-4f69-8b77-f6dc115988f8: !Template
+    answer_choices: False ||| True
+    id: aae24b54-c3a7-4f69-8b77-f6dc115988f8
+    jinja: "{{ text }} \nIn the passage above, the pronoun \"{{ span2_text }}\" refers\
+      \ to {{ span1_text }}. True or false? ||| {% if label != -1 %}{{ answer_choices[label]\
+      \ }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: the pronoun refers to
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  d88f3e21-42dc-49a5-924d-69b764a14816: !Template
+    answer_choices: No ||| Yes
+    id: d88f3e21-42dc-49a5-924d-69b764a14816
+    jinja: "{{ text }} \n{% if span2_text.lower()  == \"they\" or span2_text.lower()\
+      \ == \"them\" %}\nQuestion: Who or what are \"{{ span2_text.lower() }}\"? {{\
+      \ span1_text }}?\n{% else %}\nQuestion: Who or what is \"{{ span2_text.lower()\
+      \ }}\"? Is it {{ span1_text }}?\n{% endif %}\nAnswer: ||| {% if label != -1\
+      \ %}{{ answer_choices[label] }}{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Who or what is/are
+    reference: I double checked the only plural pronouns in WSC are "they" and "them".
diff --git a/promptsource/templates/swag/regular/templates.yaml b/promptsource/templates/swag/regular/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d757c369986f619a714f2e84830455de7258dc2
--- /dev/null
+++ b/promptsource/templates/swag/regular/templates.yaml
@@ -0,0 +1,105 @@
+dataset: swag
+subset: regular
+templates:
+  111e2684-cd6c-4808-97c1-e452941d7550: !Template
+    answer_choices: Yes ||| No
+    id: 111e2684-cd6c-4808-97c1-e452941d7550
+    jinja: "{% set instance = [0, 1, 2, 3] | choice %}\nConsider the sentence: {{\
+      \ sent2 }} {{ [ending0, ending1, ending2, ending3][instance] }} \nIs it an appropriate\
+      \ continuation of the following situation:\n{{ sent1 }} ?\nYes or No?\n||| \n\
+      {% if label  == instance %}\n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: reversed_appropriate_continuation
+    reference: The template randomly selects a continuation and checks if the continuation
+      is appropriate for the given premise.
+  124f20bc-fea8-415f-8f09-5c2f8d077232: !Template
+    answer_choices: Yes ||| No
+    id: 124f20bc-fea8-415f-8f09-5c2f8d077232
+    jinja: "{% set instance = [0, 1, 2, 3] | choice %}\nConsider the sentence: {{\
+      \ sent1 }}\nIs the following an appropriate continuation?\n{{ sent2 }} {{ [ending0,\
+      \ ending1, ending2, ending3][instance] }}\nYes or No?\n||| \n{% if label  ==\
+      \ instance %}\n{{answer_choices[0]}}\n{% else %}\n{{answer_choices[1]}}\n{%\
+      \ endif %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: appropriate_continuation
+    reference: The template checks if the two sentences are valid continuations.
+  25b7abe7-e357-4e93-8c31-5f7be319b705: !Template
+    answer_choices: (a) ||| (b) ||| (c) ||| (d)
+    id: 25b7abe7-e357-4e93-8c31-5f7be319b705
+    jinja: "{{ startphrase }}...\nHow does the description likely end? \n(a): {{ ending0\
+      \ }}\n(b): {{ ending1 }}\n(c): {{ ending2 }}\n(d): {{ ending3 }}\n||| \n{{ answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: how_ends
+    reference: 'Predict the ending key based on the startphrase. Not original task
+      because sent1 is not included. '
+  66366555-f989-4e82-beca-2aaa92960a50: !Template
+    answer_choices: (a) ||| (b) ||| (c) ||| (d)
+    id: 66366555-f989-4e82-beca-2aaa92960a50
+    jinja: "First, {{ sent1.lower() }} Then, {{ sent2.lower() }}... \nComplete with\
+      \ an appropriate ending:\n(a) {{ ending0 }}\n(b) {{ ending1 }}\n(c) {{ ending2\
+      \ }}\n(d) {{ ending3 }}\n||| \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: first_then
+    reference: Generate the ending
+  d61be86a-242e-48ad-871c-f8be5133c4df: !Template
+    answer_choices: (a) ||| (b) ||| (c) ||| (d)
+    id: d61be86a-242e-48ad-871c-f8be5133c4df
+    jinja: "First, {{ sent1.lower() }} Then, {{ sent2.lower() }}... \nChoose the key\
+      \ with an appropriate ending:\n(a) {{ ending0 }}\n(b) {{ ending1 }}\n(c) {{\
+      \ ending2 }}\n(d) {{ ending3 }}\n||| \n{{answer_choices[label]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: first_then_key
+    reference: Predict the key of the ending
+  dae2999a-843e-445f-819e-9a3255cca049: !Template
+    answer_choices: null
+    id: dae2999a-843e-445f-819e-9a3255cca049
+    jinja: "{% set endings = [ending0, ending1, ending2, ending3] %}\nGenerate the\
+      \ starting sentence with the ending: {{endings[label]}}\n||| \n{{sent1}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: generate_start
+    reference: Template generates the start of the sentence
+  ecb7eddb-5836-4b31-89d6-e2d4ebfcc779: !Template
+    answer_choices: null
+    id: ecb7eddb-5836-4b31-89d6-e2d4ebfcc779
+    jinja: 'Complete the sentence: {{ sent1 }} {{sent2}}
+
+      |||
+
+      {% set endings = [ending0, ending1, ending2, ending3] %}
+
+      {{ endings[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: open_completion
+    reference: Template for open-ended common sense completion
diff --git a/promptsource/templates/tab_fact/tab_fact/templates.yaml b/promptsource/templates/tab_fact/tab_fact/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..36051eaaada2c4e91aeadb007a8e89a02d67c61b
--- /dev/null
+++ b/promptsource/templates/tab_fact/tab_fact/templates.yaml
@@ -0,0 +1,155 @@
+dataset: tab_fact
+subset: tab_fact
+templates:
+  137a6f5d-fdcd-4849-ba3c-7ae572285ef9: !Template
+    answer_choices: null
+    id: 137a6f5d-fdcd-4849-ba3c-7ae572285ef9
+    jinja: '{% if label %}
+
+      Passage: "{{statement}}"
+
+
+      Table: "{{table_text}}"
+
+
+      Note: {{"#"}} is the delimiter between columns; {{"\\n"}} is the delimiter between
+      rows.
+
+
+      Give a suitable caption for the table. Use the statement as a supporting doc.
+      |||
+
+      {{table_caption}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: tab_fact_7
+    reference: Generate Table Caption
+  1f0606bd-0453-427f-8cc5-ab996aff680e: !Template
+    answer_choices: No ||| Yes
+    id: 1f0606bd-0453-427f-8cc5-ab996aff680e
+    jinja: 'table ==> passage?
+
+
+      table:
+
+      "{{table_caption}}"
+
+
+      {{table_text}}
+
+
+      Note: {{"#"}} is the delimiter between columns; {{"\\n"}} is the delimiter between
+      rows.
+
+
+      statement: "{{statement}}" |||
+
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: tab_fact_4
+    reference: 'Input: Table Caption, Table; Label: Yes/No (mathematical "implication"
+      symbol)'
+  33e3dbc2-3b1b-4891-8c78-2b575dd3ec35: !Template
+    answer_choices: refuted ||| entailed
+    id: 33e3dbc2-3b1b-4891-8c78-2b575dd3ec35
+    jinja: "Parse the following table:\n\nTable Caption: \"{{table_caption}}\"\n\n\
+      Table:\n\n{{table_text}}\n\nNote: {{\"#\"}} is the delimiter between columns;\
+      \ {{\"\\n\"}} is the delimiter between rows.\n\nFrom the above table, the statement\
+      \ \"{{statement}}\" can either be {{\"entailed\"}} or {{\"refuted\"}}. Which\
+      \ one is it? |||  \n{{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: tab_fact_1
+    reference: 'Input: Table Caption, Table; Label: Refuted/Entailed -- Affirmative
+      Form'
+  5bf642b0-4d75-40b7-9c0a-80b38a170d0f: !Template
+    answer_choices: null
+    id: 5bf642b0-4d75-40b7-9c0a-80b38a170d0f
+    jinja: "{% if label %}\nExpress any (part or subset of the table) / (inference\
+      \ obtained from the table) in plain English:\n\n\"{{table_caption}}\"\n\n\"\
+      {{table_text}}\" \n\nNote: {{\"#\"}} is the delimiter between columns; {{\"\\\
+      n\"}} is the delimiter between rows.\n|||\n{{statement}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: tab_fact_6
+    reference: Generate Natural Text from the table
+  6e4d3fe8-1d31-4685-8ef6-419ab8554741: !Template
+    answer_choices: No ||| Yes
+    id: 6e4d3fe8-1d31-4685-8ef6-419ab8554741
+    jinja: "Is \"{{statement}}\" corroborated by \"{{table_caption}}\", {{table_text}}\"\
+      ? \n\nNote: {{\"#\"}} is the delimiter between columns; {{\"\\n\"}} is the delimiter\
+      \ between rows.\n|||\n{{answer_choices[label]}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: tab_fact_5
+    reference: 'Input: Table Caption, Table; Label: Yes/No -- Interrogative Form (corroboration)'
+  becf68bd-726d-40c1-afb1-80afd461126c: !Template
+    answer_choices: No ||| Yes
+    id: becf68bd-726d-40c1-afb1-80afd461126c
+    jinja: 'I can''t make heads or tails of the given data. Can you help me with this?
+
+
+      I have the following paragraph: "{{statement}}". Is there any evidence of this
+      passage in the weird data below?
+
+
+      Topic: "{{table_caption}}"
+
+
+      {{table_text}}
+
+
+      Note: {{"#"}} is the delimiter between columns; {{"\\n"}} is the delimiter between
+      rows.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: tab_fact_3
+    reference: 'Input: Table Caption, Table; Label: Yes/No -- Interrogative Form'
+  faa6c21a-f52a-4eb9-a9e8-0931ea253229: !Template
+    answer_choices: refuted ||| entailed
+    id: faa6c21a-f52a-4eb9-a9e8-0931ea253229
+    jinja: 'Parse the following table:
+
+
+      Table Caption: "{{table_caption}}"
+
+
+      Table:
+
+
+      {{table_text}}
+
+
+      Note: {{"#"}} is the delimiter between columns; {{"\\n"}} is the delimiter between
+      rows.
+
+
+      From the above table, given two options ({{"refuted"}}/{{"entailed"}}), the
+      statement "{{statement}}" can definitely be not |||  {{answer_choices[1-label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: tab_fact_2
+    reference: 'Input: Table Caption, Table; Label: Refuted/Entailed (Negation) --
+      Affirmative Form'
diff --git a/promptsource/templates/tmu_gfm_dataset/templates.yaml b/promptsource/templates/tmu_gfm_dataset/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e157ffee7165751314f2bd0019f5a7bcf94f3caa
--- /dev/null
+++ b/promptsource/templates/tmu_gfm_dataset/templates.yaml
@@ -0,0 +1,143 @@
+dataset: tmu_gfm_dataset
+templates:
+  2b6a9c53-7cbc-4574-b5bd-448cf7960693: !Template
+    answer_choices: null
+    id: 2b6a9c53-7cbc-4574-b5bd-448cf7960693
+    jinja: 'Supposedly Sentence B is more natural than Sentence A. How much better
+      is it on a scale from 1 to 4?
+
+
+      Sentence A: {{source}}
+
+
+      Sentence B: {{output}}
+
+      |||
+
+      {{ (((10*ave_f) | round )/10) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: fluency
+    reference: ''
+  2b712291-0629-4499-86cc-566ee7376271: !Template
+    answer_choices: null
+    id: 2b712291-0629-4499-86cc-566ee7376271
+    jinja: 'Sentence B is grammatically better than Sentence A. How much better is
+      it on a scale from 0 to 4?
+
+
+      Sentence A: {{source}}
+
+
+      Sentence B: {{output}}
+
+      |||
+
+      {{ (((10*ave_g) | round )/10) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: grammar
+    reference: ''
+  30a17c4d-2bee-450c-b921-9b748ae87c93: !Template
+    answer_choices: null
+    id: 30a17c4d-2bee-450c-b921-9b748ae87c93
+    jinja: 'Grammatically improve the below text. Note that the original meaning has
+      to be preserved and also it should sound natural.
+
+
+      Text: {{source}}
+
+      |||
+
+      {{output}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: correct-sentence
+    reference: ''
+  9854074f-422e-47e4-bb49-e472dff76311: !Template
+    answer_choices: null
+    id: 9854074f-422e-47e4-bb49-e472dff76311
+    jinja: 'Sentence A was rewritten into Sentence B. Would you say that the original
+      meaning is well preserved? Please rate it on a scale from 0 to 4.
+
+
+      Sentence A: {{source}}
+
+
+      Sentence B: {{output}}
+
+      |||
+
+      {{ (((10*ave_m) | round )/10) }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: meaning
+    reference: ''
+  c8347303-bfcd-4fe5-b085-dee46045850c: !Template
+    answer_choices: null
+    id: c8347303-bfcd-4fe5-b085-dee46045850c
+    jinja: 'Read the below two sentences and answer the question.
+
+
+      Sentence A: {{source}}
+
+
+      Sentence B: {{output}}
+
+
+      Question: Sentence B is an improved version of Sentence A. How would you rate
+      the improvement on a scale from 0 to 4, with respect to grammaticality,  fluency,
+      and meaning preservation, respectively?
+
+      |||
+
+      {{ (((10*ave_g) | round )/10) }}, {{ (((10*ave_f) | round )/10) }}, and {{ (((10*ave_m)
+      | round )/10) }}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: grammar-fluency-meaning
+    reference: ''
+  ebb2956b-25eb-4a66-ba23-569ccf9b8675: !Template
+    answer_choices: null
+    id: ebb2956b-25eb-4a66-ba23-569ccf9b8675
+    jinja: 'Which one of the following two sentences is written better?
+
+      {% if range(0,2) | choice %}
+
+      Sentence A: {{source}}
+
+
+      Sentence B: {{output}}
+
+      |||
+
+      Sentence B
+
+      {% else %}
+
+      Sentence A: {{output}}
+
+
+      Sentence B: {{source}}
+
+      |||
+
+      Sentence A
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: choose-better
+    reference: ''
diff --git a/promptsource/templates/trec/templates.yaml b/promptsource/templates/trec/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a4aca1da659a4303166eb8685129c93938f181ed
--- /dev/null
+++ b/promptsource/templates/trec/templates.yaml
@@ -0,0 +1,385 @@
+dataset: trec
+templates:
+  21d04668-c5b3-4418-bbb6-663f1ffdb97c: !Template
+    answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+      ||| Location
+    id: 21d04668-c5b3-4418-bbb6-663f1ffdb97c
+    jinja: "Categories: {{', '.join(answer_choices)}}\n\nWhat category best describes:\
+      \ {{text}} \nAnswer: ||| {{ answer_choices [label_coarse] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: what_category_best_describe
+    reference: ''
+  2d4e0add-cfca-4f54-80a2-ddd8e91f9fd6: !Template
+    answer_choices: city ||| country ||| mountain ||| state ||| other location
+    id: 2d4e0add-cfca-4f54-80a2-ddd8e91f9fd6
+    jinja: '{% set label_mapping = {21:0, 18:1, 24:2, 11:3, 14:4} %}
+
+      {% if label_coarse == 5 %}
+
+      Is this question asking for {{'', ''.join(answer_choices)}}?
+
+      {{text}}
+
+      |||
+
+      {{ answer_choices [label_mapping[label_fine]] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_LOC
+    reference: Fine grained labels with coarse-label `LOC`, context after question
+  309bf243-2185-4090-ac66-a24f44d89966: !Template
+    answer_choices: code ||| count ||| date ||| distance ||| price ||| order ||| period
+      of time ||| percentage ||| speed ||| temperature ||| size ||| weight ||| other
+      number
+    id: 309bf243-2185-4090-ac66-a24f44d89966
+    jinja: '{% set label_mapping = {39:0, 13:1, 8:2, 40:3, 25:4, 43:5, 27:6, 38:7,
+      35:8, 41:9, 32:10, 45:11, 14:12} %}
+
+      {% if label_coarse == 4 %}
+
+      {{text}}
+
+
+      Is this question asking for {{'', ''.join(answer_choices)}}?
+
+      |||
+
+      {{ answer_choices [label_mapping[label_fine]] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_NUM_context_first
+    reference: Fine grained labels with coarse-label `NUM`, context provided first
+  3aff84f3-e478-4598-abe8-40aa24cec1fa: !Template
+    answer_choices: an animal ||| an organ of the body ||| a color ||| creative piece
+      ||| currency ||| disease or medicine ||| event ||| food ||| musical instrument
+      ||| language ||| letter ||| plant ||| product ||| religion ||| sport ||| substance
+      ||| symbol ||| technique ||| term ||| vehicle ||| word ||| other entity
+    id: 3aff84f3-e478-4598-abe8-40aa24cec1fa
+    jinja: '{% set label_mapping = {2:0, 22:1, 19:2, 1:3, 46:3, 23:4, 10:5, 17:6,
+      33:7, 37:8, 15:9, 30:10, 26:11, 16:12, 28:13, 42:14, 31:15, 20:16, 44:17, 36:18,
+      14:19} %}
+
+      {% if label_coarse == 1 %}
+
+      Is this question asking for {{'', ''.join(answer_choices)}}?
+
+      {{text}}
+
+      |||
+
+      {{ answer_choices [label_mapping[label_fine]] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_ENTY
+    reference: Fine grained labels with coarse-label `ENTY`, context after question
+  43a188a2-b6dd-46a7-af2e-81a64b90b92a: !Template
+    answer_choices: code ||| count ||| date ||| distance ||| price ||| order ||| period
+      of time ||| percentage ||| speed ||| temperature ||| size ||| weight ||| other
+      number
+    id: 43a188a2-b6dd-46a7-af2e-81a64b90b92a
+    jinja: '{% set label_mapping = {39:0, 13:1, 8:2, 40:3, 25:4, 43:5, 27:6, 38:7,
+      35:8, 41:9, 32:10, 45:11, 14:12} %}
+
+      {% if label_coarse == 4 %}
+
+      Is this question asking for {{'', ''.join(answer_choices)}}?
+
+      {{text}}
+
+      |||
+
+      {{ answer_choices [label_mapping[label_fine]] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_NUM
+    reference: Fine grained labels with coarse-label `NUM`
+  6c391f4f-027b-4425-88de-1dbb6aa706ee: !Template
+    answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+      ||| Location
+    id: 6c391f4f-027b-4425-88de-1dbb6aa706ee
+    jinja: 'Question: {{text}}
+
+
+      Descriptors: {{'', ''.join(answer_choices)}}
+
+
+      Best Descriptor?
+
+      |||
+
+      {{answer_choices[label_coarse]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: pick_the_best_descriptor
+    reference: ''
+  71090d59-dd02-4cbd-8032-ad86179b9bd4: !Template
+    answer_choices: Manner ||| Creative Piece ||| Animal ||| Expression abbreviated
+      ||| Individual ||| Group ||| Title ||| Defintion ||| Date ||| Reason ||| Event
+      ||| State ||| Description ||| Count ||| Other ||| Letter ||| Religion ||| Food
+      ||| Country ||| Color ||| Term ||| City ||| Organ of the body ||| Disease or
+      medicine ||| Mountain ||| Price ||| Product ||| Period ||| Substance ||| Sport
+      ||| Plant ||| Technique ||| Size ||| Instrument ||| Abbreviation ||| Speed |||
+      Word ||| Language ||| Percentage ||| Code ||| Distance ||| Temperature ||| Symbol
+      ||| Order ||| Vehicle ||| Weight ||| Currency
+    id: 71090d59-dd02-4cbd-8032-ad86179b9bd4
+    jinja: '{{text}}
+
+
+      What is this question asking for?
+
+      |||
+
+      {{answer_choices[label_fine] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fine_grained_open_context_first
+    reference: Fine grained classes without providing choices, context first.
+  736b2629-ed57-48ce-a458-4cbc435c499b: !Template
+    answer_choices: city ||| country ||| mountain ||| state ||| other location
+    id: 736b2629-ed57-48ce-a458-4cbc435c499b
+    jinja: '{% set label_mapping = {21:0, 18:1, 24:2, 11:3, 14:4} %}
+
+      {% if label_coarse == 5 %}
+
+      {{text}}
+
+
+      Is this question asking for {{'', ''.join(answer_choices)}}?
+
+      |||
+
+      {{ answer_choices [label_mapping[label_fine]] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_LOC_context_first
+    reference: Fine grained labels with coarse-label `LOC`, context provided first
+  7a3ed4dd-af89-493c-8efb-c67622f63034: !Template
+    answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+      ||| Location
+    id: 7a3ed4dd-af89-493c-8efb-c67622f63034
+    jinja: "Which category best describes the following question: {{text}} \n\nChoose\
+      \ from the following list: \n{{', '.join(answer_choices)}}\n ||| {{ answer_choices\
+      \ [label_coarse] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: which_category_best_describes
+    reference: ''
+  7a9e6f3c-1dee-45b0-a315-1badaf59a7b8: !Template
+    answer_choices: definition ||| description ||| manner of action ||| reason
+    id: 7a9e6f3c-1dee-45b0-a315-1badaf59a7b8
+    jinja: '{% set label_mapping={0:2, 7:1,  12:0, 9:3} %}
+
+      {% if label_coarse == 0 %}
+
+      Is this question asking for {{'', ''.join(answer_choices)}}?
+
+      {{text}}
+
+      |||
+
+      {{ answer_choices[label_mapping[label_fine]] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_DESC
+    reference: Fine grained labels with coarse-label `DESC`, context after question
+  861d1a48-1113-4f35-b777-2b2f12ab9d5d: !Template
+    answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+      ||| Location
+    id: 861d1a48-1113-4f35-b777-2b2f12ab9d5d
+    jinja: '{{text}}
+
+
+      Is this asking about {{('', '').join(answer_choices)}}?
+
+      |||
+
+      {{ answer_choices [label_coarse] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: trec1
+    reference: Context then prompt
+  93a06e72-2c15-4f8a-a46c-6a10919c4ea4: !Template
+    answer_choices: abbreviation ||| expression abbreviated
+    id: 93a06e72-2c15-4f8a-a46c-6a10919c4ea4
+    jinja: "{% set label_mapping={34:0, 3:1} %} \n{% if label_coarse == 2 %}\nIs this\
+      \ question asking for an {{', '.join(answer_choices)}}?\n{{text}}\n|||\n{{answer_choices[label_mapping[label_fine]]\
+      \ }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_ABBR
+    reference: Fine grained labels with coarse-label `ABBR`, context after question
+  a0096044-3b4c-4c80-b139-25eac8fe692a: !Template
+    answer_choices: abbreviation ||| expression abbreviated
+    id: a0096044-3b4c-4c80-b139-25eac8fe692a
+    jinja: "{% set label_mapping = {34:0, 3:1} %} \n{% if label_coarse == 2 %}\n{{text}}\n\
+      \nIs this question asking for an {{', '.join(answer_choices)}}?\n|||\n{{ answer_choices\
+      \ [label_mapping[label_fine]] }}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_ABBR_context_first
+    reference: Fine grained labels with coarse-label `ABBR`, context provided first
+  aad2def1-b694-40ee-9c26-3d1cf5c577da: !Template
+    answer_choices: Description ||| Entity ||| Abbreviation ||| Person ||| Quantity
+      ||| Location
+    id: aad2def1-b694-40ee-9c26-3d1cf5c577da
+    jinja: 'Is the following question asking about {{'', ''.join(answer_choices)}}?
+
+
+      {{text}}
+
+      |||
+
+      {{ answer_choices [label_coarse] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: trec2
+    reference: Prompt then context
+  bc58ba18-24a5-4553-be0a-2dba60efdad6: !Template
+    answer_choices: group ||| individual ||| title ||| description
+    id: bc58ba18-24a5-4553-be0a-2dba60efdad6
+    jinja: '{% set label_mapping = {5:0, 4:1, 6:2, 12:3} %}
+
+      {% if label_coarse == 3 %}
+
+      Is this question asking for {{'', ''.join(answer_choices)}}?
+
+      {{text}}
+
+      |||
+
+      {{ answer_choices[label_mapping[label_fine]] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_HUM
+    reference: Fine grained labels with coarse-label `HUM`, context after question
+  cfa8fde0-8320-4050-8d6e-7619ab14adea: !Template
+    answer_choices: Manner ||| Creative Piece ||| Animal ||| Expression abbreviated
+      ||| Individual ||| Group ||| Title ||| Defintion ||| Date ||| Reason ||| Event
+      ||| State ||| Description ||| Count ||| Other ||| Letter ||| Religion ||| Food
+      ||| Country ||| Color ||| Term ||| City ||| Organ of the body ||| Disease or
+      medicine ||| Mountain ||| Price ||| Product ||| Period ||| Substance ||| Sport
+      ||| Plant ||| Technique ||| Size ||| Instrument ||| Abbreviation ||| Speed |||
+      Word ||| Language ||| Percentage ||| Code ||| Distance ||| Temperature ||| Symbol
+      ||| Order ||| Vehicle ||| Weight ||| Currency
+    id: cfa8fde0-8320-4050-8d6e-7619ab14adea
+    jinja: 'What is this question asking for?
+
+
+      {{text}}
+
+      |||
+
+      {{ answer_choices[label_fine] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fine_grained_open
+    reference: Fine grained classes without providing choices.
+  e98b9294-76b4-4172-a78c-9c6e5fdfe73b: !Template
+    answer_choices: group ||| individual ||| title ||| description
+    id: e98b9294-76b4-4172-a78c-9c6e5fdfe73b
+    jinja: '{% set label_mapping = {5:0, 4:1, 6:2, 12:3} %}
+
+      {% if label_coarse == 3 %}
+
+      {{text}}
+
+
+      Is this question asking for {{'', ''.join(answer_choices)}}?
+
+      |||
+
+      {{ answer_choices [label_mapping[label_fine]] }}{% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_HUM_context_first
+    reference: Fine grained labels with coarse-label `HUM`, context provided first
+  fa588c55-5c69-4fd0-a0b1-edbfa092f710: !Template
+    answer_choices: definition ||| description ||| manner of action ||| reason
+    id: fa588c55-5c69-4fd0-a0b1-edbfa092f710
+    jinja: '{% set label_mapping={0:2, 7:1,  12:0, 9:3} %}
+
+      {% if label_coarse == 0 %}
+
+      {{text}}
+
+
+      Is this question asking for {{'', ''.join(answer_choices)}}?
+
+      |||
+
+      {{ answer_choices [label_mapping[label_fine]] }}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: fine_grained_DESC_context_first
+    reference: Fine grained labels with coarse-label `DESC`, context provided first
diff --git a/promptsource/templates/trivia_qa/unfiltered/templates.yaml b/promptsource/templates/trivia_qa/unfiltered/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c9e30906650305c078887c9277a50fc14fb5560e
--- /dev/null
+++ b/promptsource/templates/trivia_qa/unfiltered/templates.yaml
@@ -0,0 +1,69 @@
+dataset: trivia_qa
+subset: unfiltered
+templates:
+  5946db1a-a068-4a31-a06f-74a7d976cb6d: !Template
+    answer_choices: null
+    id: 5946db1a-a068-4a31-a06f-74a7d976cb6d
+    jinja: "{% if answer.aliases %} \n    Guess a question that has the answer \"\
+      {{answer.aliases|choice}}\" \n    |||  \n    {{question}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: guess_question
+    reference: Guess a question.
+  7ada9605-6fd1-49a9-a56e-6d778d4a0eb6: !Template
+    answer_choices: null
+    id: 7ada9605-6fd1-49a9-a56e-6d778d4a0eb6
+    jinja: "The goal is to predict an English answer string for an input English question.\
+      \ \nQuestion : {{question}}\nAnswer : \n||| \n{% if answer.aliases %} \n{{answer.aliases|choice}}\
+      \ \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: formal_description
+    reference: ''
+  91d9f950-a25a-4557-a16f-952d74629584: !Template
+    answer_choices: null
+    id: 91d9f950-a25a-4557-a16f-952d74629584
+    jinja: "Answer the following question.\n{{question}} \n|||\n{% if answer.aliases\
+      \ %} \n{{answer.aliases|choice}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: question_with_instruction
+    reference: Instruction before question.
+  bfec3d73-c024-492f-8878-64fdb6639a29: !Template
+    answer_choices: null
+    id: bfec3d73-c024-492f-8878-64fdb6639a29
+    jinja: "I've always wondered: {{question}} \n||| \n{% if answer.aliases %} \n\
+      {{answer.aliases|choice}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: first_person_context
+    reference: Ask a question in first person
+  c29c7072-0535-4e38-ba0c-b7ac0acdacf8: !Template
+    answer_choices: null
+    id: c29c7072-0535-4e38-ba0c-b7ac0acdacf8
+    jinja: "Question : {{question}}\nAnswer : \n||| \n{% if answer.aliases %} \n{{answer.aliases|choice}}\n\
+      {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      - Other
+      original_task: true
+    name: question_answer
+    reference: Plain Question
diff --git a/promptsource/templates/turk/templates.yaml b/promptsource/templates/turk/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ba98c0e30aea5c247223f891d634993e4739e61e
--- /dev/null
+++ b/promptsource/templates/turk/templates.yaml
@@ -0,0 +1,118 @@
+dataset: turk
+templates:
+  58d1370e-3fc0-4b96-9e74-950b7c3edfd9: !Template
+    answer_choices: null
+    id: 58d1370e-3fc0-4b96-9e74-950b7c3edfd9
+    jinja: 'Simplify the below sentence.
+
+
+      {{original}}
+
+      |||
+
+      {{simplifications | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: simplification
+    reference: ''
+  da3fb228-7383-497f-936c-9bcbcd0d057b: !Template
+    answer_choices: null
+    id: da3fb228-7383-497f-936c-9bcbcd0d057b
+    jinja: 'Read the below two texts and answer the question.
+
+      {% if range(0,2) | choice %}
+
+      Text A: {{original}}
+
+
+      Text B: {{simplifications | choice }}
+
+
+      One of the texts above is more verbose than the other. Which one is the verbose
+      one?
+
+      |||
+
+      Text A
+
+      {% else %}
+
+      Text A: {{simplifications | choice }}
+
+
+      Text B: {{original}}
+
+
+      One of the texts above is more verbose than the other. Which one is the verbose
+      one?
+
+      |||
+
+      Text B
+
+      {% endif %} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: choose-verbose
+    reference: ''
+  dc853532-e948-443b-bae1-3ebb968bc7c5: !Template
+    answer_choices: null
+    id: dc853532-e948-443b-bae1-3ebb968bc7c5
+    jinja: 'Make the following sentence more verbose.
+
+
+      {{simplifications | choice}}
+
+      |||
+
+      {{original}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: make-verbose
+    reference: ''
+  ec70cd03-8282-4e45-957f-927c60254ad4: !Template
+    answer_choices: null
+    id: ec70cd03-8282-4e45-957f-927c60254ad4
+    jinja: '{% if range(0,2) | choice %}
+
+      Text A: {{original}}
+
+
+      Text B: {{simplifications | choice }}
+
+
+      One of the texts above is a simplification of the other. Which one is the simplified
+      one?
+
+      |||
+
+      Text B
+
+      {% else %}
+
+      Text A: {{simplifications | choice }}
+
+
+      Text B: {{original}}
+
+
+      One of the texts above is a simplification of the other. Which one is the simplified
+      one?
+
+      |||
+
+      Text A
+
+      {% endif %} '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: choose-simplification
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/emoji/templates.yaml b/promptsource/templates/tweet_eval/emoji/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2ac3d739755aa41436940998ac1b5a8e0acd7d3f
--- /dev/null
+++ b/promptsource/templates/tweet_eval/emoji/templates.yaml
@@ -0,0 +1,51 @@
+dataset: tweet_eval
+subset: emoji
+templates:
+  8c794abe-5364-430f-aa1e-eb3501443cec: !Template
+    answer_choices: null
+    id: 8c794abe-5364-430f-aa1e-eb3501443cec
+    jinja: "{% set emo = [\n  \"\u2764\",\n  \"\U0001F60D\",\n  \"\U0001F602\",\n\
+      \  \"\U0001F495\",\n  \"\U0001F525\",\n  \"\U0001F60A\",\n  \"\U0001F60E\",\n\
+      \  \"\u2728\",\n  \"\U0001F499\",\n  \"\U0001F618\",\n  \"\U0001F4F7\",\n  \"\
+      \U0001F1FA\U0001F1F8\",\n  \"\u2600\",\n  \"\U0001F49C\",\n  \"\U0001F609\"\
+      ,\n  \"\U0001F4AF\",\n  \"\U0001F601\",\n  \"\U0001F384\",\n  \"\U0001F4F8\"\
+      ,\n  \"\U0001F61C\"] %}\n\nWhich emoji among {{emo | join(\", \")}} best describes\
+      \ the sentiment of the following tweet?\n\n{{text}} |||\n{{emo[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: emoji_option
+    reference: ''
+  c05f50e0-f708-44bc-98e7-ff7b3f9f5d93: !Template
+    answer_choices: null
+    id: c05f50e0-f708-44bc-98e7-ff7b3f9f5d93
+    jinja: "{% set emo = [\n  \"\u2764\",\n  \"\U0001F60D\",\n  \"\U0001F602\",\n\
+      \  \"\U0001F495\",\n  \"\U0001F525\",\n  \"\U0001F60A\",\n  \"\U0001F60E\",\n\
+      \  \"\u2728\",\n  \"\U0001F499\",\n  \"\U0001F618\",\n  \"\U0001F4F7\",\n  \"\
+      \U0001F1FA\U0001F1F8\",\n  \"\u2600\",\n  \"\U0001F49C\",\n  \"\U0001F609\"\
+      ,\n  \"\U0001F4AF\",\n  \"\U0001F601\",\n  \"\U0001F384\",\n  \"\U0001F4F8\"\
+      ,\n  \"\U0001F61C\"] %}\n\nWhich emoji among {{emo}} would be the best comment\
+      \ to the following tweet?\n\n{{text}} |||\n{{emo[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: emoji_reply
+    reference: ''
+  d5c771d3-28e7-420e-af47-c077cfe0e7e5: !Template
+    answer_choices: null
+    id: d5c771d3-28e7-420e-af47-c077cfe0e7e5
+    jinja: "Which emoji best describes the sentiment of the following tweet?\n\n{{text}}\
+      \ |||\n{{\n[\n  \"\u2764\",\n  \"\U0001F60D\",\n  \"\U0001F602\",\n  \"\U0001F495\
+      \",\n  \"\U0001F525\",\n  \"\U0001F60A\",\n  \"\U0001F60E\",\n  \"\u2728\",\n\
+      \  \"\U0001F499\",\n  \"\U0001F618\",\n  \"\U0001F4F7\",\n  \"\U0001F1FA\U0001F1F8\
+      \",\n  \"\u2600\",\n  \"\U0001F49C\",\n  \"\U0001F609\",\n  \"\U0001F4AF\",\n\
+      \  \"\U0001F601\",\n  \"\U0001F384\",\n  \"\U0001F4F8\",\n  \"\U0001F61C\"\n\
+      ][label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: emoji
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/emotion/templates.yaml b/promptsource/templates/tweet_eval/emotion/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a49c21c369aab3173679cbc45d09d5f8d3ea3eda
--- /dev/null
+++ b/promptsource/templates/tweet_eval/emotion/templates.yaml
@@ -0,0 +1,39 @@
+dataset: tweet_eval
+subset: emotion
+templates:
+  87db02f2-585e-4fd1-81c0-e94297607097: !Template
+    answer_choices: null
+    id: 87db02f2-585e-4fd1-81c0-e94297607097
+    jinja: "\n{% set li=[\n  \"anger\",\n  \"joy\",\n  \"optimism\",\n  \"sadness\"\
+      \n] %}\nWhich emotion among {{li | join(\", \")}} best describes the feeling\
+      \ of the author of the following tweet?\n\n{{text}}|||\n{{\n li[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: author_emotion
+    reference: ''
+  8bc3ebc5-77f1-4d55-bd96-c62429ebf093: !Template
+    answer_choices: null
+    id: 8bc3ebc5-77f1-4d55-bd96-c62429ebf093
+    jinja: "Which emotion is best represented by the following tweet?\n{{text}}|||\n\
+      {{\n[\n  \"anger\",\n  \"joy\",\n  \"optimism\",\n  \"sadness\"\n] [label]\n\
+      }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: emotion
+    reference: ''
+  a5992077-2e31-467b-a6ee-b75dee933d0e: !Template
+    answer_choices: null
+    id: a5992077-2e31-467b-a6ee-b75dee933d0e
+    jinja: "\n{% set li=[\n  \"anger\",\n  \"joy\",\n  \"optimism\",\n  \"sadness\"\
+      \n] %}\nWhich emotion among {{li| join(\", \")}} is best represented by the\
+      \ following tweet?\n{{text}}|||\n{{\n li[label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: emotion_option
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/hate/templates.yaml b/promptsource/templates/tweet_eval/hate/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f838f4534e861dd4e01ec1f4af39e03c3c53973e
--- /dev/null
+++ b/promptsource/templates/tweet_eval/hate/templates.yaml
@@ -0,0 +1,48 @@
+dataset: tweet_eval
+subset: hate
+templates:
+  3266f9d4-9c80-4e17-a8a6-1fe44ce8f3bf: !Template
+    answer_choices: no ||| yes
+    id: 3266f9d4-9c80-4e17-a8a6-1fe44ce8f3bf
+    jinja: 'Does this tweet convey the author''s hatred towards something or someone?
+
+
+      {{text}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: author_hate
+    reference: ''
+  34a5128b-6fc8-453b-94d4-4ebaa87172c1: !Template
+    answer_choices: no ||| yes
+    id: 34a5128b-6fc8-453b-94d4-4ebaa87172c1
+    jinja: 'Does this tweet convey hate?
+
+
+      {{text}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hate or not
+    reference: ''
+  971ad470-85bf-484e-aab7-b942f817bf2c: !Template
+    answer_choices: non-hate ||| hate
+    id: 971ad470-85bf-484e-aab7-b942f817bf2c
+    jinja: 'Does this tweet convey {{"hate"}} or {{"non-hate"}}?
+
+
+      {{text}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hate_option
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/irony/templates.yaml b/promptsource/templates/tweet_eval/irony/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d4eeca5e48ca79f64f4c7c0a3e7288dc8181ac78
--- /dev/null
+++ b/promptsource/templates/tweet_eval/irony/templates.yaml
@@ -0,0 +1,33 @@
+dataset: tweet_eval
+subset: irony
+templates:
+  cd2ed852-c6fa-431a-b0f1-06f0240d74a0: !Template
+    answer_choices: no ||| yes
+    id: cd2ed852-c6fa-431a-b0f1-06f0240d74a0
+    jinja: 'Does this tweet contain irony?
+
+
+      {{text}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: irony
+    reference: ''
+  e7cff075-9388-48de-af51-fe185b654217: !Template
+    answer_choices: non-irony ||| irony
+    id: e7cff075-9388-48de-af51-fe185b654217
+    jinja: 'Does this tweet contain {{"irony"}} or {{"non-irony"}}?
+
+
+      {{text}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: irony_option
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/offensive/templates.yaml b/promptsource/templates/tweet_eval/offensive/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ed90ba2bdc93e9da0eae01118f0efcb4d42cbc91
--- /dev/null
+++ b/promptsource/templates/tweet_eval/offensive/templates.yaml
@@ -0,0 +1,33 @@
+dataset: tweet_eval
+subset: offensive
+templates:
+  10850707-80f3-4a75-b9f4-1e2d12be04c0: !Template
+    answer_choices: no ||| yes
+    id: 10850707-80f3-4a75-b9f4-1e2d12be04c0
+    jinja: 'Is this tweet {{"offensive"}}?
+
+
+      {{text}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: offensive
+    reference: ''
+  332351e2-d653-427e-a25b-4d4c3d9d0f4a: !Template
+    answer_choices: no ||| yes
+    id: 332351e2-d653-427e-a25b-4d4c3d9d0f4a
+    jinja: 'Could the following tweet be taken down for {{"offensive"}} content?
+
+
+      {{text}} |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: take_down_offensive
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/sentiment/templates.yaml b/promptsource/templates/tweet_eval/sentiment/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..536567018a419feffcd822a44042982bbbbf2a5f
--- /dev/null
+++ b/promptsource/templates/tweet_eval/sentiment/templates.yaml
@@ -0,0 +1,26 @@
+dataset: tweet_eval
+subset: sentiment
+templates:
+  6702e8cd-9764-4c88-86a9-046f84c98ef2: !Template
+    answer_choices: null
+    id: 6702e8cd-9764-4c88-86a9-046f84c98ef2
+    jinja: "What sentiment does this tweet convey?\n\n{{text}} |||\n{{[\n  \"negative\"\
+      ,\n  \"neutral\",\n  \"positive\"\n][label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sentiment
+    reference: ''
+  6c6c797c-9912-4778-906b-16f465941d16: !Template
+    answer_choices: null
+    id: 6c6c797c-9912-4778-906b-16f465941d16
+    jinja: "What sentiment among {{[\n  \"negative\",\n  \"neutral\",\n  \"positive\"\
+      \n]}} does this tweet convey?\n\n{{text}} |||\n{{[\n  \"negative\",\n  \"neutral\"\
+      ,\n  \"positive\"\n][label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: sentiment_option
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_abortion/templates.yaml b/promptsource/templates/tweet_eval/stance_abortion/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..06de36bcdd4c59e389afe146b0605a0205dae5ea
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_abortion/templates.yaml
@@ -0,0 +1,26 @@
+dataset: tweet_eval
+subset: stance_abortion
+templates:
+  615151f8-ac5b-4c0e-a234-9e9b6296a2f2: !Template
+    answer_choices: null
+    id: 615151f8-ac5b-4c0e-a234-9e9b6296a2f2
+    jinja: "What option among, {{\"none\"}}, {{\"against\"}}, {{\"favor\"}}, best\
+      \ describes the stance of this tweet regarding abortion?\n\n{{text}} |||\n{{\n\
+      [\n  \"none\",\n  \"against\",\n  \"favor\"\n]\n[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: abortion_option
+    reference: ''
+  687ffa1e-a772-48b1-9291-ba4e530a909e: !Template
+    answer_choices: null
+    id: 687ffa1e-a772-48b1-9291-ba4e530a909e
+    jinja: "Is this tweet in favor of or against abortion?\n\n{{text}} |||\n{{\n[\n\
+      \  \"none\",\n  \"against\",\n  \"favor\"\n]\n[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: abortion
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_atheism/templates.yaml b/promptsource/templates/tweet_eval/stance_atheism/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c53a5e6175ce141c2de2d79e2ea7d29d53240924
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_atheism/templates.yaml
@@ -0,0 +1,26 @@
+dataset: tweet_eval
+subset: stance_atheism
+templates:
+  11c82916-b422-4d42-99b5-bee028a73843: !Template
+    answer_choices: null
+    id: 11c82916-b422-4d42-99b5-bee028a73843
+    jinja: "Is this tweet in favor of or against atheism?\n\n{{text}} |||\n{{\n[\n\
+      \  \"none\",\n  \"against\",\n  \"favor\"\n] [label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: atheism
+    reference: ''
+  39195494-2d37-4684-b30a-b406bfd9f5b3: !Template
+    answer_choices: null
+    id: 39195494-2d37-4684-b30a-b406bfd9f5b3
+    jinja: "Which option among, {{\"none\"}}, {{\"against\"}}, {{\"favor\"}}, conveys\
+      \ the stance of this tweet towards atheism?\n\n{{text}} |||\n{{\n[\n  \"none\"\
+      ,\n  \"against\",\n  \"favor\"\n] [label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: atheism_option
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_climate/templates.yaml b/promptsource/templates/tweet_eval/stance_climate/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e8410e887a34f8572b51777041aa6f84ea886756
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_climate/templates.yaml
@@ -0,0 +1,26 @@
+dataset: tweet_eval
+subset: stance_climate
+templates:
+  3ce9671c-5c83-4b1f-9a5a-aa967e66208c: !Template
+    answer_choices: null
+    id: 3ce9671c-5c83-4b1f-9a5a-aa967e66208c
+    jinja: "Is this tweet in favor of or against the effects of climate change?\n\n\
+      {{text}} |||\n{{\n[\n  \"none\",\n  \"against\",\n  \"favor\"\n][label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: climate
+    reference: ''
+  9acd132c-8261-463c-80e6-86b9f194cde5: !Template
+    answer_choices: null
+    id: 9acd132c-8261-463c-80e6-86b9f194cde5
+    jinja: "Which option among, {{\"none\"}}, {{\"against\"}}, {{\"favor\"}}, reflect\
+      \ the stance of this tweet towards the effects of climate change?\n\n{{text}}\
+      \ |||\n{{\n[\n  \"none\",\n  \"against\",\n  \"favor\"\n][label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: climate_option
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_feminist/templates.yaml b/promptsource/templates/tweet_eval/stance_feminist/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3c55b53bca80c011d588d0ca2d8802cadb307985
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_feminist/templates.yaml
@@ -0,0 +1,26 @@
+dataset: tweet_eval
+subset: stance_feminist
+templates:
+  4797c407-28f2-4490-ad95-0d5451f68e2d: !Template
+    answer_choices: null
+    id: 4797c407-28f2-4490-ad95-0d5451f68e2d
+    jinja: "Which option among, {{\"none\"}}, {{\"against\"}}, {{\"favor\"}}, reflects\
+      \ the stance of this tweet towards feminism?\n\n{{text}} |||\n{{\n[\n  \"none\"\
+      ,\n  \"against\",\n  \"favor\"\n] [label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: feminist_option
+    reference: ''
+  95d81282-256b-44c9-b168-d279c625f5b4: !Template
+    answer_choices: null
+    id: 95d81282-256b-44c9-b168-d279c625f5b4
+    jinja: "Is this tweet in favor of or against feminism?\n\n{{text}} |||\n{{\n[\n\
+      \  \"none\",\n  \"against\",\n  \"favor\"\n] [label]\n}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: feminism
+    reference: ''
diff --git a/promptsource/templates/tweet_eval/stance_hillary/templates.yaml b/promptsource/templates/tweet_eval/stance_hillary/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ec6cc000f7e79cc484028a82d1a8e4bca3248535
--- /dev/null
+++ b/promptsource/templates/tweet_eval/stance_hillary/templates.yaml
@@ -0,0 +1,26 @@
+dataset: tweet_eval
+subset: stance_hillary
+templates:
+  0910936e-dc67-49af-b320-36a56d56f5a5: !Template
+    answer_choices: null
+    id: 0910936e-dc67-49af-b320-36a56d56f5a5
+    jinja: "Which option among, {{\"none\"}}, {{\"against\"}}, {{\"favor\"}}, reflects\
+      \ the stance of this tweet towards  Hilary?\n\n{{text}} |||\n{{\n[\n  \"none\"\
+      ,\n  \"against\",\n  \"favor\"\n][label]\n}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hilary_option
+    reference: ''
+  ae0c9f9c-730c-4e9c-bf00-47d06b47ccee: !Template
+    answer_choices: null
+    id: ae0c9f9c-730c-4e9c-bf00-47d06b47ccee
+    jinja: "Is this tweet in support of Hilary?\n\n{{text}} |||\n{{\n[\n  \"none\"\
+      ,\n  \"against\",\n  \"favor\"\n][label]\n}}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: hilary
+    reference: ''
diff --git a/promptsource/templates/tydiqa/primary_task/templates.yaml b/promptsource/templates/tydiqa/primary_task/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d60cc2a864f8176ac0d820d09f2c0603f4cd94ed
--- /dev/null
+++ b/promptsource/templates/tydiqa/primary_task/templates.yaml
@@ -0,0 +1,156 @@
+dataset: tydiqa
+subset: primary_task
+templates:
+  16f11e56-a78d-4e33-bba1-586f9947baf7: !Template
+    answer_choices: Yes ||| No ||| None
+    id: 16f11e56-a78d-4e33-bba1-586f9947baf7
+    jinja: '{% if language == "english" %}
+
+      I wonder {{question_text}}.
+
+      Help me answer this question with "{{answer_choices[0]}}" or "{{answer_choices[1]}}"
+      or "{{answer_choices[2]}}" if none of the first two answers apply.
+
+      Here''s what I found on the internet:
+
+      Topic: {{document_title}}
+
+      Article: {{document_plaintext}}
+
+      |||
+
+      {{annotations.yes_no_answer[0] | capitalize}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: heres_what_I_found
+    reference: ''
+  297fc59f-bd92-493b-ae61-3c3adcb46eb3: !Template
+    answer_choices: Yes ||| No ||| None
+    id: 297fc59f-bd92-493b-ae61-3c3adcb46eb3
+    jinja: "{% if language == \"english\" %} \nQuestion: {{question_text}}\nAnswer\
+      \ the question with {{\"Yes\"}} or {{\"No\"}}. If it is not possible then answer\
+      \ {{\"None\"}}.\nHint: {{document_plaintext}}\n|||\n{{annotations.yes_no_answer[0]\
+      \ | capitalize}}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: yes_no_none
+    reference: ''
+  4b21e3be-fba4-49b7-beb1-a61de26eb0ac: !Template
+    answer_choices: Yes ||| No
+    id: 4b21e3be-fba4-49b7-beb1-a61de26eb0ac
+    jinja: "{% if language == \"english\" %} \n    {% if annotations.yes_no_answer[0]\
+      \ == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAnswer the question\
+      \ about {{document_title}}. {{question_text}}\n||| \n{{annotations.yes_no_answer[0]\
+      \ | capitalize}} \n    {% endif %} \n{% endif %} "
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: open_domain_qa_without_choices
+    reference: Answer Yes/No question
+  6835dd64-96bd-4bf8-9ba5-645d6a7b8472: !Template
+    answer_choices: Yes ||| No
+    id: 6835dd64-96bd-4bf8-9ba5-645d6a7b8472
+    jinja: '{% if language == "english" %}
+
+      {{question_text}}
+
+      Is this a "Yes/No" question?
+
+      |||
+
+      {% if annotations. yes_no_answer[0] == "NONE" %}
+
+      No
+
+      {% else %}
+
+      Yes
+
+      {% endif %}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: yes_no_question
+    reference: Ask if this is a yes no question
+  7b8b7707-dbad-40d2-a5c2-430e6ace10bb: !Template
+    answer_choices: Yes ||| No ||| None
+    id: 7b8b7707-dbad-40d2-a5c2-430e6ace10bb
+    jinja: '{% if language == "english" %}
+
+      Answer the following question with "{{answer_choices[0]}}" or "{{answer_choices[1]}}"
+      or "{{answer_choices[2]}}" if none of the first two answers apply.
+
+      Question: {{question_text}}
+
+      Topic: {{document_title}}
+
+      Article: {{document_plaintext}}
+
+      |||
+
+      {{annotations.yes_no_answer[0] | capitalize}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: read_and_answer
+    reference: ''
+  9c42e3fd-d46e-4149-bb60-4b3118104d95: !Template
+    answer_choices: Yes ||| No
+    id: 9c42e3fd-d46e-4149-bb60-4b3118104d95
+    jinja: "{% if language == \"english\" %} \n    {% if annotations.yes_no_answer[0]\
+      \ == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAfter reading the\
+      \ following text snippet from Wikipedia, please answer the question: {{question_text}}\
+      \ \n{{document_plaintext}}\n||| \n{{annotations.yes_no_answer[0] | capitalize}}\n\
+      \    {% endif %}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: after_reading_the_text
+    reference: Reading Comprehension
+  b4f7c441-41b1-4665-93f9-f2e875aed92a: !Template
+    answer_choices: Yes ||| No
+    id: b4f7c441-41b1-4665-93f9-f2e875aed92a
+    jinja: "{% if language == \"english\" %} \n    {% if annotations.yes_no_answer[0]\
+      \ == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAnswer the question\
+      \ about {{document_title}}.\nQuestion: {{question_text}}. Yes or No?\n||| \n\
+      {{annotations.yes_no_answer[0] | capitalize}}\n    {% endif %}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: open_domain_qa
+    reference: Ask Yes/No question
+  e593017f-9bcf-4442-944d-fcdf2edcb4f7: !Template
+    answer_choices: Yes ||| No
+    id: e593017f-9bcf-4442-944d-fcdf2edcb4f7
+    jinja: "{% if language == \"english\" %} \n    {% if annotations.yes_no_answer[0]\
+      \ == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nBased on the following\
+      \ text snippet, {{question_text}} \n{{document_plaintext}}\n||| \n{{annotations.yes_no_answer[0]\
+      \ | capitalize}}\n    {% endif %}\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics: []
+      original_task: true
+    name: based_on_the_text
+    reference: Binary question without mentioning KB
diff --git a/promptsource/templates/tydiqa/secondary_task/templates.yaml b/promptsource/templates/tydiqa/secondary_task/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bcdaf87a5787d2224dd555869bd08d12bf6bea2e
--- /dev/null
+++ b/promptsource/templates/tydiqa/secondary_task/templates.yaml
@@ -0,0 +1,216 @@
+dataset: tydiqa
+subset: secondary_task
+templates:
+  047ed162-f58b-42d5-81aa-0a17a9750230: !Template
+    answer_choices: null
+    id: 047ed162-f58b-42d5-81aa-0a17a9750230
+    jinja: "{% set lang = id.split('-')[0] %}\n{% if lang == \"english\" %}\nSnippet:\
+      \ {{context}}\nI know that the answer to \"{{question}}\" appears somewhere\
+      \ in the text snippet about {{title}}. Can you answer the question?\n|||\n{{answers.text\
+      \ | choice}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: can_you_answer_the_question
+    reference: Ask question with Context
+  05c99237-0e03-4ec9-95f3-dfde8ae25605: !Template
+    answer_choices: null
+    id: 05c99237-0e03-4ec9-95f3-dfde8ae25605
+    jinja: '{% set _blank2 = ["title", "context", "topic" ] %}
+
+      {% set _blank1 = ["guess", "generate", "determine"] %}
+
+      {% set _blank=["passage", "text", "text snippet", "info"]|random %}
+
+      {% set _position = ["above", "following"] |random %}
+
+      {% set lang = id.split(''-'')[0] %}
+
+      {% if lang == "english" %}
+
+      {% if  _position == "above" %}
+
+      {{context}}{{"\n"}}
+
+      {% endif %}
+
+      Can you {{_blank1|random}} the {{_blank2|random}} of the {{_position}} passage.
+
+      {% if  _position == "following" %}
+
+      {{"\n"}}{{context}}
+
+      {% endif %}
+
+      |||
+
+      {{title}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: title_generation
+    reference: Generate title from a passage
+  1f4728ba-b25e-450e-975f-6dc8c0cb4bb1: !Template
+    answer_choices: null
+    id: 1f4728ba-b25e-450e-975f-6dc8c0cb4bb1
+    jinja: '{% set lang = id.split(''-'')[0] %}
+
+      {% if lang == "english" %}
+
+      Could you generate a question whose answer is {{answers.text | choice}} based
+      on the following context: {{context}}
+
+      |||
+
+      {{question}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: end_to_end_question_generation
+    reference: generate end-to-end question from a passage.
+  3d7ee9fe-ac53-4cf3-9913-431425225a5c: !Template
+    answer_choices: null
+    id: 3d7ee9fe-ac53-4cf3-9913-431425225a5c
+    jinja: '{% set lang = id.split(''-'')[0] %}
+
+      {% if lang == "english" %}
+
+      I am trying to figure out the answer to the question "{{question}}"
+
+      I found this text about {{title}} on Wikipedia and I think it contains the answer.
+      Can you tell me the answer?
+
+      Text: {{context}}
+
+      |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: can_you_tell_me_the_answer
+    reference: Ask question with Context
+  696c888c-3419-4e4c-b559-1d9772fa60ab: !Template
+    answer_choices: null
+    id: 696c888c-3419-4e4c-b559-1d9772fa60ab
+    jinja: "{% set lang = id.split('-')[0] %}\n{% if lang == \"english\" %}\nExtract\
+      \ in the passage the answer to the question: {{question}}\nPassage about {{title}}:\
+      \ {{context}}\n|||\n{{answers.text | choice}} \n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: extract_answer
+    reference: Ask question with Context
+  c2356ac6-7761-43b8-9fb9-38ed25c0db9b: !Template
+    answer_choices: null
+    id: c2356ac6-7761-43b8-9fb9-38ed25c0db9b
+    jinja: '{% set lang = id.split("-")[0] %}
+
+      {% if lang == "english" %}
+
+      I am testing my students'' knowledge about {{title}}.
+
+      Based on the context ({{context}}), here''s the question to answer: {{question}}.
+      The answer is in the context.
+
+      |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: testing_students
+    reference: ''
+  d0966d12-6d15-4138-b273-5fe2e1619ff9: !Template
+    answer_choices: null
+    id: d0966d12-6d15-4138-b273-5fe2e1619ff9
+    jinja: '{% set lang = id.split(''-'')[0] %}
+
+      {% if lang == "english" %}
+
+      Could you generate a question about {{title}} and whose answer is {{answers.text
+      | choice}} based on the following context: {{context}}
+
+      |||
+
+      {{question}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: end_to_end_question_generation_with_title
+    reference: generate end-to-end question from a passage with a title
+  f276815f-f7c0-4dab-b12e-08e76da6d760: !Template
+    answer_choices: null
+    id: f276815f-f7c0-4dab-b12e-08e76da6d760
+    jinja: '{% set lang = id.split(''-'')[0] %}
+
+      {% if lang == "english" %}
+
+      {{question}}
+
+      Answer the question above.
+
+      |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: false
+    name: simple_question_odqa
+    reference: Ask question without Context
+  fcf0920f-5599-44a6-bf2a-9ef6bbbe1e64: !Template
+    answer_choices: null
+    id: fcf0920f-5599-44a6-bf2a-9ef6bbbe1e64
+    jinja: '{% set lang = id.split(''-'')[0] %}
+
+      {% if lang == "english" %}
+
+      I''ve always wondered: {{question}}
+
+      I searched Wikipedia and I found the following text snippet about {{title}}.
+
+      Snippet: {{context}}
+
+      What''s the answer?
+
+      |||
+
+      {{answers.text | choice}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: whats_the_answer
+    reference: Ask question with Context
diff --git a/promptsource/templates/web_questions/templates.yaml b/promptsource/templates/web_questions/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bc819077a003cbc65106bbc17499ac370b0b50fc
--- /dev/null
+++ b/promptsource/templates/web_questions/templates.yaml
@@ -0,0 +1,59 @@
+dataset: web_questions
+templates:
+  427785bc-a8f3-4c86-bd43-e54447a58615: !Template
+    answer_choices: null
+    id: 427785bc-a8f3-4c86-bd43-e54447a58615
+    jinja: 'Give me the correct facts to answer this: {{question}} ||| {{answers |
+      choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: get_the_answer
+    reference: ''
+  9f4cd4a4-79e5-40b2-bb0d-f9a86396511a: !Template
+    answer_choices: null
+    id: 9f4cd4a4-79e5-40b2-bb0d-f9a86396511a
+    jinja: Give me a possible correct answer to the question "{{ question }}" |||
+      {{ answers | choice }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: potential-correct-answer
+    reference: ''
+  bfed45a7-b36c-440b-8c94-f117cc6c9f34: !Template
+    answer_choices: null
+    id: bfed45a7-b36c-440b-8c94-f117cc6c9f34
+    jinja: 'What''s the answer to that question: {{question}} ||| {{answers | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: whats_the_answer
+    reference: ''
+  df08956c-035b-4216-af1c-61250617faa4: !Template
+    answer_choices: null
+    id: df08956c-035b-4216-af1c-61250617faa4
+    jinja: 'Short general knowledge question: {{question}} ||| {{answers | choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: short_general_knowledge_q
+    reference: ''
+  e5c72a6b-8ab4-4219-9f41-debf7224884c: !Template
+    answer_choices: null
+    id: e5c72a6b-8ab4-4219-9f41-debf7224884c
+    jinja: '{{ question|capitalize }} ||| {{ answers | choice }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Squad
+      original_task: true
+    name: question-answer
+    reference: ''
diff --git a/promptsource/templates/wiki_bio/templates.yaml b/promptsource/templates/wiki_bio/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..149d2160f6a11b04e8742c723f5e63f752b7f2c8
--- /dev/null
+++ b/promptsource/templates/wiki_bio/templates.yaml
@@ -0,0 +1,112 @@
+dataset: wiki_bio
+templates:
+  0e240546-0d3c-4049-9cc7-32039a6e50ff: !Template
+    answer_choices: null
+    id: 0e240546-0d3c-4049-9cc7-32039a6e50ff
+    jinja: 'Facts:
+
+      {% for n in range (input_text["table"]["column_header"]|length) %}
+
+      {% if input_text["table"]["column_header"][n] != "article_title" %}
+
+      - {{input_text["table"]["column_header"][n].replace("_"," ") }}: {{input_text["table"]["content"][n]
+      }}
+
+      {% endif %}
+
+      {% endfor %}
+
+      Based on these bullet points, write a short biography describing the life of
+      {{input_text["context"]}}. |||
+
+      {{target_text}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics: []
+      original_task: true
+    name: who
+    reference: ''
+  53694802-680c-4584-8b07-686f56c45278: !Template
+    answer_choices: null
+    id: 53694802-680c-4584-8b07-686f56c45278
+    jinja: "Read the bio below and try to give details on {{input_text[\"context\"\
+      ]}}'s: \n{% for n in range (input_text[\"table\"][\"column_header\"]|length)\
+      \ %} {% if input_text[\"table\"][\"column_header\"][n] != \"article_title\"\
+      \ %}\n- {{ input_text[\"table\"][\"column_header\"][n].replace(\"_\",\" \")\
+      \ }} \n{% endif %} {% endfor %}\n\nBio: {{target_text}} |||\n{% for n in range\
+      \ (input_text[\"table\"][\"column_header\"]|length) %}\n{% if input_text[\"\
+      table\"][\"column_header\"][n] != \"article_title\" %}\n- {{ input_text[\"table\"\
+      ][\"column_header\"][n].replace(\"_\",\" \") }} is {{ input_text[\"table\"][\"\
+      content\"][n] }}\n{% endif %}\n{% endfor %}\n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: comprehension
+    reference: ''
+  788c1adf-bc8e-40ed-a81f-ac6f0dfcb471: !Template
+    answer_choices: null
+    id: 788c1adf-bc8e-40ed-a81f-ac6f0dfcb471
+    jinja: "What type of details about {{input_text[\"context\"]}} can be gathered\
+      \ from the following bio?\n\nBio: {{target_text}} |||\n{% for n in range (input_text[\"\
+      table\"][\"column_header\"]|length) %}\n{% if input_text[\"table\"][\"column_header\"\
+      ][n] != \"article_title\" %}\n- {{ input_text[\"table\"][\"column_header\"][n].replace(\"\
+      _\",\" \") }} \n{% endif %}\n{% endfor %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: what_content
+    reference: ''
+  a954e5bb-c763-4d8e-82a8-7e96cfce8b78: !Template
+    answer_choices: null
+    id: a954e5bb-c763-4d8e-82a8-7e96cfce8b78
+    jinja: '{% for n in range (input_text["table"]["column_header"]|length) %}
+
+      {% if input_text["table"]["column_header"][n] != "article_title" and input_text["table"]["column_header"][n]
+      !="name" %}
+
+      - {{ input_text["table"]["column_header"][n].replace("_"," ") }} is {{ input_text["table"]["content"][n]
+      }}
+
+      {% endif %}
+
+      {% endfor %}
+
+
+      Given the details above, guess who could this information be about. |||
+
+      {{input_text["context"]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: guess_person
+    reference: ''
+  d14f4527-cd06-484b-af25-ba9082bcda38: !Template
+    answer_choices: null
+    id: d14f4527-cd06-484b-af25-ba9082bcda38
+    jinja: 'What key details about {{input_text["context"]}} can be extracted from
+      the following bio?
+
+
+      Bio: {{target_text}} |||
+
+      {% for n in range (input_text["table"]["column_header"]|length) %}
+
+      {% if input_text["table"]["column_header"][n] != "article_title" %}
+
+      - {{ input_text["table"]["column_header"][n].replace("_"," ") }} is {{ input_text["table"]["content"][n]
+      }}
+
+      {% endif %}
+
+      {% endfor %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: key_content
+    reference: ''
diff --git a/promptsource/templates/wiki_hop/masked/templates.yaml b/promptsource/templates/wiki_hop/masked/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9cb72cad4cc5d75c754d9f15b9ba5f6b3bcbc30c
--- /dev/null
+++ b/promptsource/templates/wiki_hop/masked/templates.yaml
@@ -0,0 +1,164 @@
+dataset: wiki_hop
+subset: masked
+templates:
+  08f2d1cf-c026-4b65-96d0-a28ff91affb5: !Template
+    answer_choices: null
+    id: 08f2d1cf-c026-4b65-96d0-a28ff91affb5
+    jinja: '{% set question_split = question.split('' '') %}
+
+      {% if question_split[0]=="place_of_birth" %}
+
+      Information:
+
+      - {{ supports | join("\n- ") }}
+
+
+      Where was {{ question_split[1:] | join(" ")}} born? Choose from the following:
+
+      - {{ candidates | join("\n- ") }}
+
+      {% elif question_split[0]=="country_of_citizenship" %}
+
+      Information:
+
+      - {{ supports | join("\n- ") }}
+
+
+      What country is {{ question_split[1:] | join(" ")}} a citizen of? Choose from
+      the following:
+
+      - {{ candidates | join("\n- ") }}
+
+      {% elif question_split[0]=="place_of_death" %}
+
+      Information:
+
+      - {{ supports | join("\n- ") }}
+
+
+      Where did {{ question_split[1:] | join(" ")}} die? Choose from the following:
+
+      - {{ candidates | join("\n- ") }}
+
+      {% endif %}
+
+      |||
+
+      {% if question_split[0] in ["place_of_birth", "country_of_citizenship", "place_of_death"]
+      %}
+
+      {{answer}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Indirect Question about Birthplace / Citizenship / Place of Death
+    reference: Ask about place of birth, citizenship, or place of death for the subject
+      entity.
+  1fb4f0d9-9533-49ab-8ac9-53cd42849584: !Template
+    answer_choices: null
+    id: 1fb4f0d9-9533-49ab-8ac9-53cd42849584
+    jinja: 'Information:
+
+      - {{ supports | join("\n- ") }}
+
+
+      {% set question_split = question.split('' '') %}
+
+      What is the relationship between "{{ question_split[1:] | join(" ")}}" and "{{answer}}"?
+
+
+      |||
+
+      {{ question_split[0] | replace("_", " ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Explain Relation
+    reference: Given information, explain the relation between the subject entity
+      and the object entity in a fact triple.
+  3181f711-a376-4d6e-9fca-a34e1d048585: !Template
+    answer_choices: null
+    id: 3181f711-a376-4d6e-9fca-a34e1d048585
+    jinja: 'Information:
+
+      - {{ supports | join("\n- ") }}
+
+
+      {% set question_split = question.split('' '') %}
+
+      Question: ({{ question_split[1:] | join(" ")}}, {{ question_split[0] | replace("_",
+      " ") }}, ?)
+
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Generate Object Answer
+    reference: Given information, generate the best object entity for the fact triple.
+  639fa83f-14fd-457a-886e-a65334cb7e66: !Template
+    answer_choices: null
+    id: 639fa83f-14fd-457a-886e-a65334cb7e66
+    jinja: "Information:\n- {{ supports | join(\"\\n- \") }}\n\n{% set question_split\
+      \ = question.split(' ') %}\nQuestion: ({{ question_split[1:] | join(\" \")}},\
+      \ {{ question_split[0] | replace(\"_\", \" \") }}, ?)\n\nCandidate Answers:\
+      \ \n- {{ candidates | join(\"\\n- \") }}\n|||\n{{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Choose Best Object Candidate
+    reference: Given information and possible object candidates, choose the best object
+      for the fact triple.
+  c543669d-d3aa-4eb2-9338-0fa7c37b6b90: !Template
+    answer_choices: null
+    id: c543669d-d3aa-4eb2-9338-0fa7c37b6b90
+    jinja: 'Information:
+
+      - {{ supports | join("\n- ") }}
+
+
+      {% set question_split = question.split('' '') %}
+
+      Generate a fact triple for the information above.
+
+
+      |||
+
+      ({{ question_split[1:] | join(" ") }}, {{ question_split[0] | replace("_", "
+      ") }}, {{answer}})'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Generate Fact Triple
+    reference: Given information, generate a fact triple.
+  dc8f1874-f6d5-4dc4-a829-0899286021a0: !Template
+    answer_choices: null
+    id: dc8f1874-f6d5-4dc4-a829-0899286021a0
+    jinja: 'Information:
+
+      - {{ supports | join("\n- ") }}
+
+
+      {% set question_split = question.split('' '') %}
+
+      Question: (?, {{ question_split[0] | replace("_", " ") }}, {{answer}})
+
+
+      |||
+
+      {{ question_split[1:] | join(" ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Generate Subject Answer
+    reference: Given information, generate the best subject entity for the fact triple.
diff --git a/promptsource/templates/wiki_hop/original/templates.yaml b/promptsource/templates/wiki_hop/original/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f91d7070b031d7ec6695d2e3bff53417c6de03ca
--- /dev/null
+++ b/promptsource/templates/wiki_hop/original/templates.yaml
@@ -0,0 +1,273 @@
+dataset: wiki_hop
+subset: original
+templates:
+  0bb6b603-115e-4ae9-b17b-881fa72b2e81: !Template
+    answer_choices: '{{candidates | join("|||")}}'
+    id: 0bb6b603-115e-4ae9-b17b-881fa72b2e81
+    jinja: "Information:\n{% for support in supports %}\n- {{ support }}\n{% endfor\
+      \ %}\n\n{% set question_split = question.split(' ') %}\nWhat object entity has\
+      \ the relation of '{{ question_split[0] | replace(\"_\", \" \")}}' with the\
+      \ subject '{{ question_split[1:] | join(\" \")}}'? \n\nChoices:\n- {{answer_choices\
+      \ | join(\"\\n - \") }}\n\n|||\n{{answer}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choose_best_object_interrogative_1
+    reference: Given information and subject and relation, choose the best object
+      entity (interrogative instruction).
+  23e0d05a-8777-45c4-8692-13f3dc5a40bb: !Template
+    answer_choices: null
+    id: 23e0d05a-8777-45c4-8692-13f3dc5a40bb
+    jinja: 'Information:
+
+      {% for support in supports %}
+
+      - {{ support }}
+
+      {% endfor %}
+
+
+      {% set question_split = question.split('' '') %}
+
+      What is the relationship between ''{{ question_split[1:] | join(" ")}}'' and
+      ''{{answer}}''?
+
+
+      |||
+
+      {{ question_split[0] | replace("_", " ") }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: explain_relation
+    reference: Given information, explain the relation between the subject entity
+      and the object entity.
+  2fadafea-f814-4ff1-a3aa-cace9067f31f: !Template
+    answer_choices: null
+    id: 2fadafea-f814-4ff1-a3aa-cace9067f31f
+    jinja: 'Information:
+
+      {% for support in supports %}
+
+      - {{ support }}
+
+      {% endfor %}
+
+
+      {% set question_split = question.split('' '') %}
+
+      What entity does ''{{ question_split[1:] | join(" ")}}'' has the relation ''{{
+      question_split[0] | replace("_", " ") }}'' with?
+
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: generate_object
+    reference: Given information, generate the best object entity (without answer
+      choices in prompt).
+  40bdb0e7-def9-4829-9a37-a05d354ef7cd: !Template
+    answer_choices: null
+    id: 40bdb0e7-def9-4829-9a37-a05d354ef7cd
+    jinja: 'Information:
+
+      {% for support in supports %}
+
+      - {{ support }}
+
+      {% endfor %}
+
+
+      {% set question_split = question.split('' '') %}
+
+      Given the paragraphs above, decide what entity has the relation ''{{ question_split[0]
+      | replace("_", " ") }}'' with ''{{answer}}''.
+
+
+      |||
+
+      {{ question_split[1:] | join(" ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: generate_subject
+    reference: Given information, generate the best subject entity for the fact triple.
+  4836e754-b2c9-4697-b386-6770494dc5f5: !Template
+    answer_choices: '{{candidates | join("|||")}}'
+    id: 4836e754-b2c9-4697-b386-6770494dc5f5
+    jinja: 'Information:
+
+      {% for support in supports %}
+
+      - {{ support }}
+
+      {% endfor %}
+
+
+      {% set question_split = question.split('' '') %}
+
+      Given the information above, choose from the list below the object entity that
+      exhibits the relation ''{{ question_split[0] | replace("_", " ")}}'' with the
+      subject ''{{ question_split[1:] | join(" ")}}''.
+
+
+      Choices:
+
+      - {{answer_choices | join("\n - ") }}
+
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choose_best_object_affirmative_1
+    reference: Given information and subject and relation, choose the best object
+      entity (affirmative instruction).
+  4a1b61f6-c619-4d3d-aec2-f41a8986641c: !Template
+    answer_choices: '{{candidates | join("|||")}}'
+    id: 4a1b61f6-c619-4d3d-aec2-f41a8986641c
+    jinja: 'Information:
+
+      {% for support in supports %}
+
+      - {{ support }}
+
+      {% endfor %}
+
+
+      {% set question_split = question.split('' '') %}
+
+      After reading the paragraphs above, we are interested in knowing the entity
+      with which ''{{ question_split[1:] | join(" ")}}'' exhibits the relationship
+      of ''{{ question_split[0] | replace("_", " ")}}''. Find the answer from the
+      choices below.
+
+
+      Choices:
+
+      - {{answer_choices | join("\n - ") }}
+
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choose_best_object_affirmative_3
+    reference: Given information and subject and relation, choose the best object
+      entity (affirmative instruction).
+  c4675106-0ac5-4bf0-a400-f628daae81db: !Template
+    answer_choices: null
+    id: c4675106-0ac5-4bf0-a400-f628daae81db
+    jinja: 'Information:
+
+      {% for support in supports %}
+
+      - {{ support }}
+
+      {% endfor %}
+
+
+      {% set question_split = question.split('' '') %}
+
+      Given the information, choose the subject and object entities that have the
+      relation of ''{{ question_split[0] | replace("_", " ") }}''.
+
+
+      |||
+
+      {{ question_split[1:] | join(" ") }} , {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: false
+    name: generate_subject_and_object
+    reference: Given information and relation, generate the subject and object.
+  e4dc7abf-d56a-4217-ba7f-7470cd959e8e: !Template
+    answer_choices: '{{candidates | join("|||")}}'
+    id: e4dc7abf-d56a-4217-ba7f-7470cd959e8e
+    jinja: 'Information:
+
+      {% for support in supports %}
+
+      - {{ support }}
+
+      {% endfor %}
+
+
+      {% set question_split = question.split('' '') %}
+
+      After reading the paragraphs above, choose the best answer for the entity that
+      related to ''{{ question_split[1:] | join(" ")}}'' with the relationship of
+      ''{{ question_split[0] | replace("_", " ")}}''.
+
+
+      Choices:
+
+      - {{answer_choices | join("\n - ") }}
+
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choose_best_object_affirmative_2
+    reference: Given information and subject and relation, choose the best object
+      entity (affirmative instruction).
+  f44936e1-cbde-4d41-b462-6150cce8c1c8: !Template
+    answer_choices: '{{candidates | join("|||")}}'
+    id: f44936e1-cbde-4d41-b462-6150cce8c1c8
+    jinja: 'Information:
+
+      {% for support in supports %}
+
+      - {{ support }}
+
+      {% endfor %}
+
+
+      {% set question_split = question.split('' '') %}
+
+      ''{{ question_split[1:] | join(" ")}}'' is related to which object entity through
+      the relation of ''{{ question_split[0] | replace("_", " ")}}''?
+
+
+      Choices:
+
+      - {{answer_choices | join("\n - ") }}
+
+
+      |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: choose_best_object_interrogative_2
+    reference: Given information and subject and relation, choose the best object
+      entity (interrogative instruction).
diff --git a/promptsource/templates/wiki_qa/templates.yaml b/promptsource/templates/wiki_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0280936a1887950fb8af276d6e4e1df6c54d4393
--- /dev/null
+++ b/promptsource/templates/wiki_qa/templates.yaml
@@ -0,0 +1,225 @@
+dataset: wiki_qa
+templates:
+  148e8e91-4f38-4427-8806-8a407268cda9: !Template
+    answer_choices: No ||| Yes
+    id: 148e8e91-4f38-4427-8806-8a407268cda9
+    jinja: 'Question: {{question}}?
+
+      Would "{{answer}}" be a reasonable answer? |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Is This True?
+    reference: Model is inserted both question and answer and output whether the answer
+      is correct or not.
+  2395d5ce-5abd-4193-9cf1-863c7271a4f0: !Template
+    answer_choices: No ||| Yes
+    id: 2395d5ce-5abd-4193-9cf1-863c7271a4f0
+    jinja: 'I am verifying the answers generated by an automatic system to the following
+      question: {{question}}
+
+      Suggested answer: {{answer}}
+
+      Should I validate this answer?
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: automatic_system
+    reference: ''
+  3480df1e-88bb-4b3d-90df-3f292463eb76: !Template
+    answer_choices: null
+    id: 3480df1e-88bb-4b3d-90df-3f292463eb76
+    jinja: '{% if label == 1 %}
+
+      What is the question to: "{{answer}}"? The topic is {{document_title}}.|||
+
+      "{{question}}?"
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Jeopardy style
+    reference: Given a passage (an answer), generate the question.
+  8a9f2146-aa30-4e17-b1e2-aeb858b08b55: !Template
+    answer_choices: null
+    id: 8a9f2146-aa30-4e17-b1e2-aeb858b08b55
+    jinja: '{% if label == 1 %}
+
+      Determine the topic of the question-answer pair.
+
+      Question: "{{question}}?";  Answer: "{{answer}}"? Topic: |||
+
+      {{document_title}}
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Topic Prediction - Question and Answer Pair
+    reference: Given a correct Question-Answer pair, generate the topic.
+  a99a68fa-46ae-4331-8b97-fcf751db3f6f: !Template
+    answer_choices: null
+    id: a99a68fa-46ae-4331-8b97-fcf751db3f6f
+    jinja: '{% if label == 1 %}
+
+      Generate a question about the topic "{{document_title}}" whose answer would
+      be: {{answer}}.|||
+
+      {{question}}?
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Generate Question from Topic
+    reference: Given a topic, generate a question.
+  add469e1-b8d9-4926-8f38-3a60c85a7d2b: !Template
+    answer_choices: No ||| Yes
+    id: add469e1-b8d9-4926-8f38-3a60c85a7d2b
+    jinja: 'Question: {{question}}
+
+      I found the following answer on Google: {{answer}}
+
+      Is that a correct answer? Yes or no.
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: found_on_google
+    reference: ''
+  b0ad07f8-8799-4dd8-8f55-82f3f817f1fd: !Template
+    answer_choices: null
+    id: b0ad07f8-8799-4dd8-8f55-82f3f817f1fd
+    jinja: '{% if label == 1 %}
+
+      Determine the topic of the question.
+
+      Question: "{{question}}?"
+
+      Topic: |||
+
+      {{document_title}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Topic Prediction - Question Only
+    reference: Given a Question, generate the topic.
+  bfa3adac-d75b-4e09-aa92-dc38e334937f: !Template
+    answer_choices: False ||| True
+    id: bfa3adac-d75b-4e09-aa92-dc38e334937f
+    jinja: 'The exercise is to decide whether the question accepts the proposed suggestion
+      as a correct answer. If yes, write "{{answer_choices[1]}}", otherwise write
+      "{{answer_choices[0]}}".
+
+      Question: {{question}}
+
+      Suggestion: {{answer}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: exercise
+    reference: ''
+  c802cf19-59a7-4a3e-a6ab-5cbb1f169c70: !Template
+    answer_choices: No ||| Yes
+    id: c802cf19-59a7-4a3e-a6ab-5cbb1f169c70
+    jinja: 'This is a correct answer to the following question about {{document_title}}.
+      Yes or no?
+
+      Answer: {{answer}}
+
+      Question: {{question}}
+
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Decide_good_answer
+    reference: ''
+  cdc54124-723e-4e1c-878c-aeaabf55c28c: !Template
+    answer_choices: null
+    id: cdc54124-723e-4e1c-878c-aeaabf55c28c
+    jinja: '{% if label == 1 %}
+
+      Determine the topic of the passage.
+
+      "{{answer}}"
+
+      Topic:|||
+
+      {{document_title}}
+
+      {% endif %}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: false
+    name: Topic Prediction - Answer Only
+    reference: Given a correct Answer (as a text passage), generate the topic.
+  d827a178-ff54-4bbf-bc6d-8756950ae5c5: !Template
+    answer_choices: null
+    id: d827a178-ff54-4bbf-bc6d-8756950ae5c5
+    jinja: '{% if label == 1 %}
+
+      Answer this question: {{question}}?|||
+
+      {{answer}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - BLEU
+      - ROUGE
+      original_task: true
+    name: Direct Answer to Question
+    reference: Generates an answers given a question.
diff --git a/promptsource/templates/wiki_split/templates.yaml b/promptsource/templates/wiki_split/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..49b5e1aeb04075c9f824c54bcaffab72065b16c0
--- /dev/null
+++ b/promptsource/templates/wiki_split/templates.yaml
@@ -0,0 +1,78 @@
+dataset: wiki_split
+templates:
+  50e32867-042a-480c-a55d-99f0d9d9bca6: !Template
+    answer_choices: null
+    id: 50e32867-042a-480c-a55d-99f0d9d9bca6
+    jinja: "The following sentence is pretty complex to understand.\n\n \"{{complex_sentence|replace(\"\
+      ' '' \",\"\")}}\"\n\nSimply this sentence into two simpler sentences |||\n\"\
+      {{simple_sentence_1|replace(\"' '' \",\"\")}}\" \n\n \"{{simple_sentence_2|replace(\"\
+      ' '' \",\"\")}}\""
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: simplify
+    reference: ''
+  6b67a1a2-4cfd-4052-b59b-a219ac7a543e: !Template
+    answer_choices: null
+    id: 6b67a1a2-4cfd-4052-b59b-a219ac7a543e
+    jinja: "In college-level essay writing courses, it is important to show sophistication\
+      \ in the use of the English language. So, it is important to form complex sentences\
+      \ that combine ideas.\nCombine the following two sentences to form one complex\
+      \ sentence.\n\n\"{{simple_sentence_1|replace(\"' '' \",\"\")}}\" \n\n \"{{simple_sentence_2|replace(\"\
+      ' '' \",\"\")}}\" |||\n\"{{complex_sentence|replace(\"' '' \",\"\")}}\""
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: complex-ify
+    reference: ''
+  96a38f85-90d4-438a-b4e9-ccb61310d5a8: !Template
+    answer_choices: null
+    id: 96a38f85-90d4-438a-b4e9-ccb61310d5a8
+    jinja: "Break down \"{{complex_sentence|replace(\"' '' \",\"\")}}\" into two simpler\
+      \ sentences |||\n\"{{simple_sentence_1|replace(\"' '' \",\"\")}}\" \n\n \"{{simple_sentence_2|replace(\"\
+      ' '' \",\"\")}}\""
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: separate
+    reference: ''
+  c5da1785-e4e3-4788-ad41-bc9255e137fa: !Template
+    answer_choices: null
+    id: c5da1785-e4e3-4788-ad41-bc9255e137fa
+    jinja: "\"{{complex_sentence|replace(\"' '' \",\"\")}}\" is made up of \"{{simple_sentence_1|replace(\"\
+      ' '' \",\"\")}}\"  and another sentence. What is the other sentence?|||\n\"\
+      {{simple_sentence_2|replace(\"' '' \",\"\")}}\" \n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: find A given B and C
+    reference: ''
+  da51aebf-e4c1-495b-b69e-0ed4eb646d37: !Template
+    answer_choices: null
+    id: da51aebf-e4c1-495b-b69e-0ed4eb646d37
+    jinja: 'Combine "{{simple_sentence_1|replace("'' '''' ","")}}" and "{{simple_sentence_2|replace("''
+      '''' ","")}}" to create a complex sentence |||
+
+      "{{complex_sentence|replace("'' '''' ","")}}"'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: combine
+    reference: ''
+  f021fe90-342d-4ff8-973f-8d3f6f04e572: !Template
+    answer_choices: null
+    id: f021fe90-342d-4ff8-973f-8d3f6f04e572
+    jinja: "\"{{complex_sentence|replace(\"' '' \",\"\")}}\" is made up of \"{{simple_sentence_2|replace(\"\
+      ' '' \",\"\")}}\"  and another sentence. What is the other sentence? |||\n\"\
+      {{simple_sentence_1|replace(\"' '' \",\"\")}}\" \n"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: find B given A and C
+    reference: ''
diff --git a/promptsource/templates/wino_bias/type1_anti/templates.yaml b/promptsource/templates/wino_bias/type1_anti/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..936e1c0a8397d1e65660ddaa8d75a885df7038f6
--- /dev/null
+++ b/promptsource/templates/wino_bias/type1_anti/templates.yaml
@@ -0,0 +1,148 @@
+dataset: wino_bias
+subset: type1_anti
+templates:
+  4faa9623-6d11-47d1-8d6e-bb41af088cff: !Template
+    answer_choices: null
+    id: 4faa9623-6d11-47d1-8d6e-bb41af088cff
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      In the previous sentence, the pronoun "{{ pronoun }}" can be replaced with |||
+      {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: replaced with
+    reference: ''
+  5e5c9f7b-2c07-42d7-baf2-925e91a5fb9b: !Template
+    answer_choices: null
+    id: 5e5c9f7b-2c07-42d7-baf2-925e91a5fb9b
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      What does "{{ pronoun }}" refer to in the following sentence?
+
+      {{tokens | join(" ")}} ||| {{referent}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: refers_to
+    reference: ''
+  5ea6715b-20b2-4f10-8122-54ed3af54763: !Template
+    answer_choices: null
+    id: 5ea6715b-20b2-4f10-8122-54ed3af54763
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      In the sentence below, what does "{{pronoun}}" represent?
+
+      {{tokens | join(" ")}} ||| {{referent}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: represent
+    reference: ''
+  8d5eedf2-de08-41fb-a584-7f35df315fd3: !Template
+    answer_choices: null
+    id: 8d5eedf2-de08-41fb-a584-7f35df315fd3
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      In the passage above, the pronoun "{{ pronoun }}" refers to ||| {{ referent
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: the pronoun refers to
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  d102cd81-e0d1-46bf-9e7d-a620328ad3bf: !Template
+    answer_choices: null
+    id: d102cd81-e0d1-46bf-9e7d-a620328ad3bf
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: What does p stand for
+    reference: ''
+  d355811f-eb29-4e6e-9d57-299eea1d96e1: !Template
+    answer_choices: null
+    id: d355811f-eb29-4e6e-9d57-299eea1d96e1
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      Here, by "{{ pronoun }}" they mean ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: by p they mean
+    reference: ''
+  f4bdb35d-ccb0-4482-a47e-603f8566301e: !Template
+    answer_choices: null
+    id: f4bdb35d-ccb0-4482-a47e-603f8566301e
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      {% if pronoun.lower()  == "they" or pronoun.lower() == "them" %}
+
+      Question: Who or what are "{{ pronoun }}"?
+
+      {% else %}
+
+      Question: Who or what is "{{ pronoun }}"?
+
+      {% endif %}
+
+      Answer: ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Who or what is/are
+    reference: ''
diff --git a/promptsource/templates/wino_bias/type1_pro/templates.yaml b/promptsource/templates/wino_bias/type1_pro/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..50549e1b6de54947f014b25c957c99a22f01eaf3
--- /dev/null
+++ b/promptsource/templates/wino_bias/type1_pro/templates.yaml
@@ -0,0 +1,148 @@
+dataset: wino_bias
+subset: type1_pro
+templates:
+  13b2dbe4-abf3-4b09-b7cb-459224881800: !Template
+    answer_choices: null
+    id: 13b2dbe4-abf3-4b09-b7cb-459224881800
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      What does "{{ pronoun }}" refer to in the following sentence?
+
+      {{tokens | join(" ")}} ||| {{referent}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: refers_to
+    reference: ''
+  13b2dbe4-abf3-4b09-b7cb-459224881801: !Template
+    answer_choices: null
+    id: 13b2dbe4-abf3-4b09-b7cb-459224881801
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      In the sentence below, what does "{{pronoun}}" represent?
+
+      {{tokens | join(" ")}} ||| {{referent}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: represent
+    reference: ''
+  143449f6-350a-44ef-ab4d-857841eadaf8: !Template
+    answer_choices: null
+    id: 143449f6-350a-44ef-ab4d-857841eadaf8
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      In the previous sentence, the pronoun "{{ pronoun }}" can be replaced with |||
+      {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: replaced with
+    reference: ''
+  18004871-0d0c-4f59-976c-53becd04c98f: !Template
+    answer_choices: null
+    id: 18004871-0d0c-4f59-976c-53becd04c98f
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      {% if pronoun.lower()  == "they" or pronoun.lower() == "them" %}
+
+      Question: Who or what are "{{ pronoun }}"?
+
+      {% else %}
+
+      Question: Who or what is "{{ pronoun }}"?
+
+      {% endif %}
+
+      Answer: ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Who or what is/are
+    reference: ''
+  1ab4e47e-bb58-47c4-8148-fcfaf4a75785: !Template
+    answer_choices: null
+    id: 1ab4e47e-bb58-47c4-8148-fcfaf4a75785
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: What does p stand for
+    reference: ''
+  97fb69f9-34d6-4fb2-bb60-75679c4a25c1: !Template
+    answer_choices: null
+    id: 97fb69f9-34d6-4fb2-bb60-75679c4a25c1
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      In the passage above, the pronoun "{{ pronoun }}" refers to ||| {{ referent
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: the pronoun refers to
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  e5ac51e8-beaf-4cf9-a7fe-20d8cc2b1d0a: !Template
+    answer_choices: null
+    id: e5ac51e8-beaf-4cf9-a7fe-20d8cc2b1d0a
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      Here, by "{{ pronoun }}" they mean ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: by p they mean
+    reference: ''
diff --git a/promptsource/templates/wino_bias/type2_anti/templates.yaml b/promptsource/templates/wino_bias/type2_anti/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..221142a078b395da85794b68d9cfa7c3d4bddaaf
--- /dev/null
+++ b/promptsource/templates/wino_bias/type2_anti/templates.yaml
@@ -0,0 +1,148 @@
+dataset: wino_bias
+subset: type2_anti
+templates:
+  3cdaa371-affb-48da-ba8f-f3dcb574fdcc: !Template
+    answer_choices: null
+    id: 3cdaa371-affb-48da-ba8f-f3dcb574fdcc
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      What does "{{ pronoun }}" refer to in the following sentence?
+
+      {{tokens | join(" ")}} ||| {{referent}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: refers_to
+    reference: ''
+  4ee240b3-482d-4f4c-8d87-7824b656d486: !Template
+    answer_choices: null
+    id: 4ee240b3-482d-4f4c-8d87-7824b656d486
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      In the previous sentence, the pronoun "{{ pronoun }}" can be replaced with |||
+      {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: replaced with
+    reference: ''
+  4f3a74bc-da74-4ee0-a3d4-a4387313102d: !Template
+    answer_choices: null
+    id: 4f3a74bc-da74-4ee0-a3d4-a4387313102d
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: What does p stand for
+    reference: ''
+  560ea974-4478-49c7-988e-f49853d45119: !Template
+    answer_choices: null
+    id: 560ea974-4478-49c7-988e-f49853d45119
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      In the sentence below, what does "{{pronoun}}" represent?
+
+      {{tokens | join(" ")}} ||| {{referent}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: represent
+    reference: ''
+  72c3f2ad-41b4-4aba-901e-b08a756b5cd2: !Template
+    answer_choices: null
+    id: 72c3f2ad-41b4-4aba-901e-b08a756b5cd2
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      {% if pronoun.lower()  == "they" or pronoun.lower() == "them" %}
+
+      Question: Who or what are "{{ pronoun }}"?
+
+      {% else %}
+
+      Question: Who or what is "{{ pronoun }}"?
+
+      {% endif %}
+
+      Answer: ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Who or what is/are
+    reference: ''
+  73750099-941c-4929-adb7-aaad3a8f3ac7: !Template
+    answer_choices: null
+    id: 73750099-941c-4929-adb7-aaad3a8f3ac7
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      Here, by "{{ pronoun }}" they mean ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: by p they mean
+    reference: ''
+  7cb4282d-48ae-43fd-9075-e65e24980724: !Template
+    answer_choices: null
+    id: 7cb4282d-48ae-43fd-9075-e65e24980724
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      In the passage above, the pronoun "{{ pronoun }}" refers to ||| {{ referent
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: the pronoun refers to
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
diff --git a/promptsource/templates/wino_bias/type2_pro/templates.yaml b/promptsource/templates/wino_bias/type2_pro/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..298f712d4bad5520e80369b4637d704febee1970
--- /dev/null
+++ b/promptsource/templates/wino_bias/type2_pro/templates.yaml
@@ -0,0 +1,148 @@
+dataset: wino_bias
+subset: type2_pro
+templates:
+  165a421e-6a90-4a7a-8ec5-06ae904ab46f: !Template
+    answer_choices: null
+    id: 165a421e-6a90-4a7a-8ec5-06ae904ab46f
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      {% if pronoun.lower()  == "they" or pronoun.lower() == "them" %}
+
+      Question: Who or what are "{{ pronoun }}"?
+
+      {% else %}
+
+      Question: Who or what is "{{ pronoun }}"?
+
+      {% endif %}
+
+      Answer: ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: Who or what is/are
+    reference: ''
+  25066e95-3782-44fc-949e-3620edd24a22: !Template
+    answer_choices: null
+    id: 25066e95-3782-44fc-949e-3620edd24a22
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      What does "{{ pronoun }}" refer to in the following sentence?
+
+      {{tokens | join(" ")}} ||| {{referent}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: refers_to
+    reference: ''
+  793c09af-1ec7-492a-ab65-392b0b17d807: !Template
+    answer_choices: null
+    id: 793c09af-1ec7-492a-ab65-392b0b17d807
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      In the sentence below, what does "{{pronoun}}" represent?
+
+      {{tokens | join(" ")}} ||| {{referent}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: represent
+    reference: ''
+  83446f7f-07ae-4b88-8aff-3eda1183dd7b: !Template
+    answer_choices: null
+    id: 83446f7f-07ae-4b88-8aff-3eda1183dd7b
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      In the previous sentence, the pronoun "{{ pronoun }}" can be replaced with |||
+      {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: replaced with
+    reference: ''
+  85a90e9b-a6ef-4e25-9577-f26f14350099: !Template
+    answer_choices: null
+    id: 85a90e9b-a6ef-4e25-9577-f26f14350099
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      Here, by "{{ pronoun }}" they mean ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: by p they mean
+    reference: ''
+  ace9b776-df88-4895-b1e1-6821c5fcef72: !Template
+    answer_choices: null
+    id: ace9b776-df88-4895-b1e1-6821c5fcef72
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      Here, what does "{{ pronoun }}" stand for? ||| {{ referent }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: What does p stand for
+    reference: ''
+  af0b86f2-2fc6-4237-89da-d6d7dd2d9a40: !Template
+    answer_choices: null
+    id: af0b86f2-2fc6-4237-89da-d6d7dd2d9a40
+    jinja: '{% set pronoun = tokens[coreference_clusters[2] | int : coreference_clusters[3]
+      | int + 1] | join(" ") %}
+
+      {% set referent = tokens[coreference_clusters[0] | int : coreference_clusters[1]
+      | int + 1] | join(" ") %}
+
+      {{tokens | join(" ")}}
+
+      In the passage above, the pronoun "{{ pronoun }}" refers to ||| {{ referent
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Other
+      original_task: true
+    name: the pronoun refers to
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
diff --git a/promptsource/templates/winograd_wsc/wsc273/templates.yaml b/promptsource/templates/winograd_wsc/wsc273/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..41193b5c40b4f4d7dccf1ef357a25c3ee7ffb177
--- /dev/null
+++ b/promptsource/templates/winograd_wsc/wsc273/templates.yaml
@@ -0,0 +1,109 @@
+dataset: winograd_wsc
+subset: wsc273
+templates:
+  18233597-fcd3-415d-a184-a971e98119d9: !Template
+    answer_choices: '{{ options | join("|||") }}'
+    id: 18233597-fcd3-415d-a184-a971e98119d9
+    jinja: '{{ text }} Here, does "{{ pronoun }}" stand for {{ answer_choices[0] }}
+      or {{ answer_choices[1] }}? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does p stand for
+    reference: ''
+  3f04226a-fb68-4d82-bda1-658f1a316365: !Template
+    answer_choices: '{{ options | join("|||") }}'
+    id: 3f04226a-fb68-4d82-bda1-658f1a316365
+    jinja: "{{ text }} \n{% if pronoun.lower()  == \"they\" or pronoun.lower() ==\
+      \ \"them\" %}\nQuestion: Who or what are \"{{ pronoun }}\"? {{ answer_choices[0]\
+      \ }} or {{ answer_choices[1] }}?\n{% else %}\nQuestion: Who or what is \"{{\
+      \ pronoun }}\"? Is it {{ answer_choices[0] }} or {{ answer_choices[1] }}?\n\
+      {% endif %}\nAnswer: ||| {{ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Who or what is/are
+    reference: ''
+  53603685-806f-4332-ae9a-e393b6ad2d89: !Template
+    answer_choices: '{{options | join("|||")}}'
+    id: 53603685-806f-4332-ae9a-e393b6ad2d89
+    jinja: '{{ text }} In the previous sentence, can the pronoun "{{pronoun }}" be
+      replaced with "{{ answer_choices[0] }}" or "{{ answer_choices[1] }}"? ||| {{
+      answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: replaced with
+    reference: ''
+  62aa8a33-2f62-43ec-aa7e-20d2052e2a8c: !Template
+    answer_choices: '{{ options | join("|||") }}'
+    id: 62aa8a33-2f62-43ec-aa7e-20d2052e2a8c
+    jinja: "{{ text }} \nIn the passage above, the pronoun \"{{ pronoun }}\" refers\
+      \ to {{ answer_choices[0] }} or {{ answer_choices[1] }}? ||| {{ answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: the pronoun refers to
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  6e8a2985-ecc1-4184-86b9-929d2d25746e: !Template
+    answer_choices: '{{options | join("|||")}}'
+    id: 6e8a2985-ecc1-4184-86b9-929d2d25746e
+    jinja: "Context: {{ text }} \n\n{% if pronoun.lower()  == \"they\" or pronoun.lower()\
+      \ == \"them\" %}\nQuestion: \"{{ pronoun }}\" are {{ answer_choices[0] }} or\
+      \ {{ answer_choices[1] }}?\n{% else %}\nQuestion: \"{{ pronoun }}\" is {{ answer_choices[0]\
+      \ }} or {{ answer_choices[1] }}?\n{% endif %}\n\nAnswer: ||| {{ answer_choices[label]\
+      \ }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: p is/are r
+    reference: ''
+  7ee12960-5512-431a-b1eb-b3a975761f6c: !Template
+    answer_choices: '{{options | join("|||")}}'
+    id: 7ee12960-5512-431a-b1eb-b3a975761f6c
+    jinja: "Passage: {{ text }} \n\nQuestion: In the passage above, does the pronoun\
+      \ \"{{ pronoun }}\" refer to {{ answer_choices[0] }} or {{answer_choices[1]\
+      \ }}?\n\nAnswer: ||| {{ answer_choices[label] }}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: GPT-3 Style
+    reference: Adapted from Figure G33, p. 59, Brown et al. 2020
+  a0e4b805-e0bc-4b20-81bd-2b265ace8644: !Template
+    answer_choices: '{{options | join("|||")}}'
+    id: a0e4b805-e0bc-4b20-81bd-2b265ace8644
+    jinja: '{{ text }} In the previous sentence, does the pronoun "{{ pronoun }}"
+      refer to {{ answer_choices[0] }} or {{ answer_choices[1] }}? ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does the pronoun refer to
+    reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
+  e617cc59-9eca-4b17-ba2e-a87b79fe8c89: !Template
+    answer_choices: '{{options | join("|||")}}'
+    id: e617cc59-9eca-4b17-ba2e-a87b79fe8c89
+    jinja: '{{ text }} Here, by "{{ pronoun }}" do they mean "{{ answer_choices[0]
+      }}" or "{{ answer_choices[1]}}"? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: by p they mean
+    reference: ''
diff --git a/promptsource/templates/winograd_wsc/wsc285/templates.yaml b/promptsource/templates/winograd_wsc/wsc285/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..123822254ca1f6703cdbb9dfb15ecb59d4bfaaf8
--- /dev/null
+++ b/promptsource/templates/winograd_wsc/wsc285/templates.yaml
@@ -0,0 +1,58 @@
+dataset: winograd_wsc
+subset: wsc285
+templates:
+  2194ca96-203d-4306-8888-1093655ce825: !Template
+    answer_choices: null
+    id: 2194ca96-203d-4306-8888-1093655ce825
+    jinja: "Identify the phrase in \"{{text}}\" in which the key action or context\
+      \ surrounding the pronoun is described |||  \n{{quote}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: key_action
+    reference: ''
+  442e7f58-5378-4ebb-853e-db1dc2e3fd78: !Template
+    answer_choices: null
+    id: 442e7f58-5378-4ebb-853e-db1dc2e3fd78
+    jinja: "Who does the pronoun \"{{pronoun}}\" in \"{{text}}\" refer to?\n\nThe\
+      \ options are {{options | join(\" and \")}} |||  \n{{options[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: pronoun_options
+    reference: ''
+  46eaa2c6-b7d7-4c64-a7a2-a78aac7b3712: !Template
+    answer_choices: null
+    id: 46eaa2c6-b7d7-4c64-a7a2-a78aac7b3712
+    jinja: "Identify the pronoun in \"{{text}}\" |||  \n{{pronoun}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: identify_pronoun
+    reference: ''
+  5e26dc7e-c9c2-4392-94cf-0bda201d96f5: !Template
+    answer_choices: null
+    id: 5e26dc7e-c9c2-4392-94cf-0bda201d96f5
+    jinja: 'Who does the pronoun "{{pronoun}}" in "{{text}}" refer to? |||
+
+      {{options[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: pronoun
+    reference: ''
+  f73d23dd-440c-451e-8f2f-a6d8ad41c858: !Template
+    answer_choices: null
+    id: f73d23dd-440c-451e-8f2f-a6d8ad41c858
+    jinja: "Identify the pronoun in \"{{text}}\" and the entity it is referring to\
+      \ |||  \n\"{{pronoun}}\" which refers to the \"{{options[label]}}\""
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: identify_pronoun_entity
+    reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_debiased/templates.yaml b/promptsource/templates/winogrande/winogrande_debiased/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ed8050daf3568f8875a26151c74f44f85a135c10
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_debiased/templates.yaml
@@ -0,0 +1,98 @@
+dataset: winogrande
+subset: winogrande_debiased
+templates:
+  1ce2be12-1815-4a07-80a7-ac3c3505b005: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 1ce2be12-1815-4a07-80a7-ac3c3505b005
+    jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+      \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+      \ else %} {{ option2 }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Replace
+    reference: ''
+  1ddbbca4-8917-4a1d-9d83-f42db77f24ba: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 1ddbbca4-8917-4a1d-9d83-f42db77f24ba
+    jinja: '{{sentence}}
+
+      What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+      }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+      %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: underscore refer to
+    reference: ''
+  276eaba6-17e5-403a-98c7-0f8c53c35221: !Template
+    answer_choices: '{{ option1 }} ||| {{ option2 }}'
+    id: 276eaba6-17e5-403a-98c7-0f8c53c35221
+    jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+      or  {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+      }} {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does underscore refer to
+    reference: ''
+  9702d456-4261-4a7e-94c5-6a9d2a1c4859: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 9702d456-4261-4a7e-94c5-6a9d2a1c4859
+    jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer | int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: stand for
+    reference: ''
+  bb9b91fc-760a-45cd-bacd-dcb05a1cb2f3: !Template
+    answer_choices: True ||| False
+    id: bb9b91fc-760a-45cd-bacd-dcb05a1cb2f3
+    jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer|int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: True or False
+    reference: ''
+  ebabc54d-cff4-46a7-9c22-2412b8ce00c6: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: ebabc54d-cff4-46a7-9c22-2412b8ce00c6
+    jinja: 'Fill in the _ in the below sentence:
+
+      {{sentence}}
+
+
+      Choices:
+
+      - {{ option1 }}
+
+      - {{ option2 }}
+
+
+      Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+      endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fill in the blank
+    reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_l/templates.yaml b/promptsource/templates/winogrande/winogrande_l/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5c105a3fbd01c19ecd8b224c513e9c51e3345c76
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_l/templates.yaml
@@ -0,0 +1,98 @@
+dataset: winogrande
+subset: winogrande_l
+templates:
+  035038df-0b71-45c1-b18f-14451c580508: !Template
+    answer_choices: True ||| False
+    id: 035038df-0b71-45c1-b18f-14451c580508
+    jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer|int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: True or False
+    reference: ''
+  1ceacaa7-ccd3-4e4e-ad0d-c75b241e0ddb: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 1ceacaa7-ccd3-4e4e-ad0d-c75b241e0ddb
+    jinja: 'Fill in the _ in the below sentence:
+
+      {{sentence}}
+
+
+      Choices:
+
+      - {{ option1 }}
+
+      - {{ option2 }}
+
+
+      Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+      endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fill in the blank
+    reference: ''
+  5627ba11-110c-4871-a0ed-86e7e66fec60: !Template
+    answer_choices: '{{ option1 }} ||| {{ option2 }}'
+    id: 5627ba11-110c-4871-a0ed-86e7e66fec60
+    jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+      or  {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+      }} {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does underscore refer to
+    reference: ''
+  57030ec2-f026-491d-88bc-c2709455cc56: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 57030ec2-f026-491d-88bc-c2709455cc56
+    jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+      \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+      \ else %} {{ option2 }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Replace
+    reference: ''
+  c920f420-f80d-4e94-9024-b45fbf4d6367: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: c920f420-f80d-4e94-9024-b45fbf4d6367
+    jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer | int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: stand for
+    reference: ''
+  edb3168f-ce82-4b9e-9713-6a581f5aef96: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: edb3168f-ce82-4b9e-9713-6a581f5aef96
+    jinja: '{{sentence}}
+
+      What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+      }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+      %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: underscore refer to
+    reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_m/templates.yaml b/promptsource/templates/winogrande/winogrande_m/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb7b4f25b5a0c32794d489253c46d60fb8e9c75f
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_m/templates.yaml
@@ -0,0 +1,98 @@
+dataset: winogrande
+subset: winogrande_m
+templates:
+  5170abad-7046-4538-8216-68cc508d3d23: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 5170abad-7046-4538-8216-68cc508d3d23
+    jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+      \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+      \ else %} {{ option2 }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Replace
+    reference: ''
+  5c801298-c08f-4165-8c25-38592e341a1c: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 5c801298-c08f-4165-8c25-38592e341a1c
+    jinja: 'Fill in the _ in the below sentence:
+
+      {{sentence}}
+
+
+      Choices:
+
+      - {{ option1 }}
+
+      - {{ option2 }}
+
+
+      Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+      endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fill in the blank
+    reference: ''
+  894b5a5b-6685-462b-ad00-ea82f4fb80e2: !Template
+    answer_choices: True ||| False
+    id: 894b5a5b-6685-462b-ad00-ea82f4fb80e2
+    jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer|int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: True or False
+    reference: ''
+  a394fafe-260c-4473-8b67-a4ecfc7fe1fd: !Template
+    answer_choices: '{{ option1 }} ||| {{ option2 }}'
+    id: a394fafe-260c-4473-8b67-a4ecfc7fe1fd
+    jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+      or  {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+      }} {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does underscore refer to
+    reference: ''
+  ad345e36-5dc7-4a0b-a9f4-654f253e3c20: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: ad345e36-5dc7-4a0b-a9f4-654f253e3c20
+    jinja: '{{sentence}}
+
+      What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+      }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+      %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: underscore refer to
+    reference: ''
+  ead1b8bf-7c42-4320-86ec-3a7c7aef14cb: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: ead1b8bf-7c42-4320-86ec-3a7c7aef14cb
+    jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer | int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: stand for
+    reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_s/templates.yaml b/promptsource/templates/winogrande/winogrande_s/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7940a9924a1f52bff4fd09588729d3a3d16970d0
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_s/templates.yaml
@@ -0,0 +1,98 @@
+dataset: winogrande
+subset: winogrande_s
+templates:
+  1a150f71-aba6-4e7d-9be2-dce2df84c5de: !Template
+    answer_choices: '{{ option1 }} ||| {{ option2 }}'
+    id: 1a150f71-aba6-4e7d-9be2-dce2df84c5de
+    jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+      or  {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+      }} {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does underscore refer to
+    reference: ''
+  85d97c81-9d82-4df5-91db-56a5459b61cd: !Template
+    answer_choices: True ||| False
+    id: 85d97c81-9d82-4df5-91db-56a5459b61cd
+    jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer|int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: True or False
+    reference: ''
+  968aa4aa-67d7-41ca-8ff2-462d482f4d89: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 968aa4aa-67d7-41ca-8ff2-462d482f4d89
+    jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer | int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: stand for
+    reference: ''
+  d304fb01-a60a-4846-9378-394f84f05d85: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: d304fb01-a60a-4846-9378-394f84f05d85
+    jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+      \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+      \ else %} {{ option2 }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Replace
+    reference: ''
+  d715126d-1cd3-4fc0-bd32-945d8c1af800: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: d715126d-1cd3-4fc0-bd32-945d8c1af800
+    jinja: 'Fill in the _ in the below sentence:
+
+      {{sentence}}
+
+
+      Choices:
+
+      - {{ option1 }}
+
+      - {{ option2 }}
+
+
+      Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+      endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fill in the blank
+    reference: ''
+  d8e01dcd-ec07-40a3-a642-8446f81f700a: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: d8e01dcd-ec07-40a3-a642-8446f81f700a
+    jinja: '{{sentence}}
+
+      What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+      }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+      %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: underscore refer to
+    reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_xl/templates.yaml b/promptsource/templates/winogrande/winogrande_xl/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c47004153a659524c6e9ac0947cad6a275432d5a
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_xl/templates.yaml
@@ -0,0 +1,98 @@
+dataset: winogrande
+subset: winogrande_xl
+templates:
+  0f23f058-5b4d-42a3-92d4-5d60688aa90c: !Template
+    answer_choices: '{{ option1 }} ||| {{ option2 }}'
+    id: 0f23f058-5b4d-42a3-92d4-5d60688aa90c
+    jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+      or  {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+      }} {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does underscore refer to
+    reference: ''
+  5080f912-fac8-400f-983c-944baf9b10c0: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 5080f912-fac8-400f-983c-944baf9b10c0
+    jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer | int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: stand for
+    reference: ''
+  5af00ba1-86e0-421b-bb97-26bf58df52d3: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 5af00ba1-86e0-421b-bb97-26bf58df52d3
+    jinja: '{{sentence}}
+
+      What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+      }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+      %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: underscore refer to
+    reference: ''
+  ac4e5cb4-f874-460a-8578-ddf1c6541bb4: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: ac4e5cb4-f874-460a-8578-ddf1c6541bb4
+    jinja: 'Fill in the _ in the below sentence:
+
+      {{sentence}}
+
+
+      Choices:
+
+      - {{ option1 }}
+
+      - {{ option2 }}
+
+
+      Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+      endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fill in the blank
+    reference: ''
+  d11378d1-2f24-4509-bbbc-bfa2921300d5: !Template
+    answer_choices: True ||| False
+    id: d11378d1-2f24-4509-bbbc-bfa2921300d5
+    jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer|int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: True or False
+    reference: ''
+  e7e42e9e-bc57-46ed-ad8a-76a5b90a5bb9: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: e7e42e9e-bc57-46ed-ad8a-76a5b90a5bb9
+    jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+      \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+      \ else %} {{ option2 }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Replace
+    reference: ''
diff --git a/promptsource/templates/winogrande/winogrande_xs/templates.yaml b/promptsource/templates/winogrande/winogrande_xs/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dfefa4fc749dcd8d7241c0b3b6e6dc567739010f
--- /dev/null
+++ b/promptsource/templates/winogrande/winogrande_xs/templates.yaml
@@ -0,0 +1,98 @@
+dataset: winogrande
+subset: winogrande_xs
+templates:
+  52b40d2b-7547-44e2-8cc6-eb127ecbb2fe: !Template
+    answer_choices: '{{ option1 }} ||| {{ option2 }}'
+    id: 52b40d2b-7547-44e2-8cc6-eb127ecbb2fe
+    jinja: '{{ sentence }} In the previous sentence, does _ refer to {{ option1 }}
+      or  {{ option2 }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2
+      }} {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: does underscore refer to
+    reference: ''
+  75072f4d-843b-4ba4-96a5-cd0ea3e7855d: !Template
+    answer_choices: True ||| False
+    id: 75072f4d-843b-4ba4-96a5-cd0ea3e7855d
+    jinja: 'The _ in the sentence below refers to {{option1}}. True or False?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer|int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: false
+    name: True or False
+    reference: ''
+  8a976d84-efbc-47c0-8e3d-8e3cf89c3e2c: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 8a976d84-efbc-47c0-8e3d-8e3cf89c3e2c
+    jinja: '{{sentence}}
+
+      What does the _ in the above sentence refer to? {{ option1 }} or {{ option2
+      }}? ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {% endif
+      %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: underscore refer to
+    reference: ''
+  9ee3a3b0-d84f-4d66-bb79-de82ac5040b2: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: 9ee3a3b0-d84f-4d66-bb79-de82ac5040b2
+    jinja: 'In the sentence below, does the _ stand for {{answer_choices[0]}} or {{answer_choices[1]}}?
+
+      {{sentence}}|||
+
+      {{answer_choices[answer | int - 1]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: stand for
+    reference: ''
+  eb63f6df-99d9-4a00-a165-976e93c7271f: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: eb63f6df-99d9-4a00-a165-976e93c7271f
+    jinja: "{{sentence}}\nReplace the _ in the above sentence with the correct option:\
+      \ \n- {{option1}}\n- {{option2}}\n|||\n{% if answer == '1' %} {{option1}} {%\
+      \ else %} {{ option2 }} {% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: Replace
+    reference: ''
+  f87367de-59ca-4859-abe0-26521a77fc67: !Template
+    answer_choices: '{{option1}} ||| {{option2}}'
+    id: f87367de-59ca-4859-abe0-26521a77fc67
+    jinja: 'Fill in the _ in the below sentence:
+
+      {{sentence}}
+
+
+      Choices:
+
+      - {{ option1 }}
+
+      - {{ option2 }}
+
+
+      Answer: ||| {% if answer == ''1'' %} {{option1}} {% else %} {{ option2 }} {%
+      endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics:
+      - Accuracy
+      original_task: true
+    name: fill in the blank
+    reference: ''
diff --git a/promptsource/templates/wiqa/templates.yaml b/promptsource/templates/wiqa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6fb3f8e364a4287b2dfb24d3521a2c13cba5856a
--- /dev/null
+++ b/promptsource/templates/wiqa/templates.yaml
@@ -0,0 +1,211 @@
+dataset: wiqa
+templates:
+  1bc8d95b-0a50-49f4-a46b-bd752929926d: !Template
+    answer_choices: null
+    id: 1bc8d95b-0a50-49f4-a46b-bd752929926d
+    jinja: '-  {{ question_para_step[1:] | join("\n- ") }}
+
+
+      What might be the first step of the process?
+
+
+      |||
+
+
+      {{ question_para_step | first }}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics: []
+      original_task: false
+    name: what_might_be_the_first_step_of_the_process
+    reference: ''
+  360cd99a-2f83-469a-a505-d80808159dd2: !Template
+    answer_choices: null
+    id: 360cd99a-2f83-469a-a505-d80808159dd2
+    jinja: '
+
+      {% set process_list = question_para_step[:-1] if question_para_step[-1] == ""
+      else question_para_step %}
+
+      -  {{ process_list[:-1] | join("\n- ") }}
+
+
+      What might be the last step of the process?
+
+
+      |||
+
+
+      {{ process_list | last }}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics: []
+      original_task: false
+    name: what_might_be_the_last_step_of_the_process
+    reference: ''
+  4191b162-9220-46e5-a2f0-0a763eef55a0: !Template
+    answer_choices: null
+    id: 4191b162-9220-46e5-a2f0-0a763eef55a0
+    jinja: 'What is the missing first step of the following process:
+
+
+      -  {{ question_para_step[1:] | join("\n- ") }}
+
+
+      |||
+
+
+      {{ question_para_step | first }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics: []
+      original_task: false
+    name: what_is_the_missing_first_step
+    reference: ''
+  52d69c02-5ff3-4fe7-bcaf-a6b995a15020: !Template
+    answer_choices: null
+    id: 52d69c02-5ff3-4fe7-bcaf-a6b995a15020
+    jinja: ' {% set process_list = question_para_step[:-1] if question_para_step[-1]
+      == "" else question_para_step %}
+
+      What is the final step of the following process:
+
+      -  {{ process_list[:-1] | join("\n- ") }}
+
+
+      |||
+
+
+      {{ process_list | last }}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics: []
+      original_task: false
+    name: what_is_the_final_step_of_the_following_process
+    reference: ''
+  5dfee2c2-9742-4003-8ab6-dfe0ce5a745b: !Template
+    answer_choices: null
+    id: 5dfee2c2-9742-4003-8ab6-dfe0ce5a745b
+    jinja: 'Process:
+
+      - {{ question_para_step | join("\n- ")}}
+
+
+      Question:
+
+      {{question_stem}}
+
+
+      How does the supposed perturbation influence the second effect mentioned. Answer
+      by {{"more, less or no effect"}}
+
+
+      |||
+
+
+      {{answer_label|replace("_", " ")}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: effect_with_string_answer
+    reference: ''
+  667c291f-6a36-4334-aa49-804c9e72500b: !Template
+    answer_choices: null
+    id: 667c291f-6a36-4334-aa49-804c9e72500b
+    jinja: 'Process:
+
+
+      - {{ question_para_step | join("\n- ") }}
+
+
+      {{question_stem}}
+
+
+      Which of the following is the supposed perturbation?
+
+
+      - {{"directly impacting a step of the process"}}
+
+      - {{"indirectly impacting a step of the process"}}
+
+      - {{"not impacting any step of the process"}}
+
+
+
+      |||
+
+
+      {{{"EXOGENOUS_EFFECT": "indirectly impacting a step of the process", "OUTOFPARA_DISTRACTOR":
+      "not impacting any step of the process", "INPARA_EFFECT": "directly impacting
+      a step of the process"}[metadata_question_type]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: false
+    name: which_of_the_following_is_the_supposed_perturbation
+    reference: ''
+  6cf2b300-6790-4120-9592-9db63bec221b: !Template
+    answer_choices: null
+    id: 6cf2b300-6790-4120-9592-9db63bec221b
+    jinja: 'Process:
+
+      - {{ question_para_step | join("\n- ")}}
+
+
+      Question:
+
+      {{question_stem}}
+
+
+      - {{"A: more"}}
+
+      - {{"B: less"}}
+
+      - {{"C: no effect"}}
+
+
+      |||
+
+
+      {{answer_label_as_choice}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: true
+      metrics: []
+      original_task: true
+    name: effect_with_label_answer
+    reference: ''
+  a17313bd-94bb-47ab-82bf-538df1b1ad5f: !Template
+    answer_choices: null
+    id: a17313bd-94bb-47ab-82bf-538df1b1ad5f
+    jinja: 'Process:
+
+
+      - {{ question_para_step | join("\n- ") }}
+
+
+      Perturbation hypothesis:
+
+      {{question_stem}}
+
+
+      Does the supposed perturbation have an effect (direct or indirect) on the process?
+
+
+      |||
+
+
+      {{{"EXOGENOUS_EFFECT": "yes", "OUTOFPARA_DISTRACTOR": "no", "INPARA_EFFECT":
+      "yes"}[metadata_question_type]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics: []
+      original_task: false
+    name: does_the_supposed_perturbation_have_an_effect
+    reference: ''
diff --git a/promptsource/templates/xnli/en/templates.yaml b/promptsource/templates/xnli/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..729c6639aeaf65afc4817782906758899d416964
--- /dev/null
+++ b/promptsource/templates/xnli/en/templates.yaml
@@ -0,0 +1,120 @@
+dataset: xnli
+subset: en
+templates:
+  4e122d26-7e79-49c0-961b-cf8ee134759e: !Template
+    answer_choices: No ||| Neutral ||| Yes
+    id: 4e122d26-7e79-49c0-961b-cf8ee134759e
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 contradict Sentence 2? Yes, No, or {{"Neutral"}}?
+      |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Concatenation contraposition
+    reference: Concatenation contraposition
+  a1506390-8d7a-4b8f-922c-6135e23094d7: !Template
+    answer_choices: must be true ||| might be true ||| must be false
+    id: a1506390-8d7a-4b8f-922c-6135e23094d7
+    jinja: Given that {{premise}}, it {{"must be true, might be true, or must be false"}}
+      that {{hypothesis}}? ||| It {{ answer_choices[label] }}.
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: "given\u2026 must be true that\u2026"
+    reference: 'Maybe a little verbose for a generative model, but anecdotally this
+      is the most natural way of how I say an NLI sentence pair out loud to humans.
+      Caveat: NLI annotations are not meant to be strictly truth-conditional entailment,
+      so "must" is not ideal.'
+  c62a3048-018e-4d93-bc46-645f3f763ee6: !Template
+    answer_choices: No ||| No ||| Yes
+    id: c62a3048-018e-4d93-bc46-645f3f763ee6
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 contradict Sentence 2? Yes or No? |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Label binarization contraposition
+    reference: Inspired from https://arxiv.org/pdf/1902.01007.pdf Section 4 - Implementation
+      and evaluation
+  d027ab50-a86b-45ba-99fa-018c0e4cac4a: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: d027ab50-a86b-45ba-99fa-018c0e4cac4a
+    jinja: Given that {{premise}} Does it follow that {{hypothesis}} Yes, no, or maybe?
+      ||| {{ answer_choices[label] }}
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: "given\u2026 does it follow that\u2026 "
+    reference: "\"Does it follow that\" could be replaced with \"can we infer that\u2026\
+      \ \", \"is it guaranteed that\u2026\", etc. Ideally there should be a question\
+      \ mark after \"does it follow that {hypothesis}?\", but the hypothesis string\
+      \ often comes with ending punctuations of its own."
+  d9e13133-267e-46c4-afad-c2379dcc5272: !Template
+    answer_choices: True ||| Neither ||| False
+    id: d9e13133-267e-46c4-afad-c2379dcc5272
+    jinja: "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither? ||| \n\
+      {{answer_choices[label]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: ANLI GPT3
+    reference: ANLI prompt format from Table G7 in the GPT3 paper
+  dd4276e6-aebd-44a3-b3cf-baf8a4c237f0: !Template
+    answer_choices: Yes ||| No ||| No
+    id: dd4276e6-aebd-44a3-b3cf-baf8a4c237f0
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 entail Sentence 2? Yes or No? |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Label binarization
+    reference: Grouping "neutral" and "contradiction" as a single label following
+      https://arxiv.org/pdf/1902.01007.pdf Section 4 - Implementation and evaluation
+  ddfd2eb1-96a4-42db-ad5e-91d7cb011b4e: !Template
+    answer_choices: Yes ||| Maybe ||| No
+    id: ddfd2eb1-96a4-42db-ad5e-91d7cb011b4e
+    jinja: '{{premise}} Based on the previous passage, is it true that {{hypothesis}}
+      Yes, no, or maybe? ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: based on the previous passage
+    reference: "Adapted from the BoolQ prompts in Schick & Sch\xFCtze 2021."
+  e174f56a-b0af-4937-b6ae-1897cac26eba: !Template
+    answer_choices: Yes ||| Neutral ||| No
+    id: e174f56a-b0af-4937-b6ae-1897cac26eba
+    jinja: 'Sentence 1: {{premise}}
+
+      Sentence 2: {{hypothesis}}
+
+      Question: Does Sentence 1 entail Sentence 2? Yes, No, or {{"Neutral"}}? |||
+
+      {{answer_choices[label]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Concatenation
+    reference: Concatenation of premise and hypothesis
diff --git a/promptsource/templates/xquad/xquad.en/templates.yaml b/promptsource/templates/xquad/xquad.en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9d0719fe6f631c73f849f4b2141ed4127510e5bd
--- /dev/null
+++ b/promptsource/templates/xquad/xquad.en/templates.yaml
@@ -0,0 +1,94 @@
+dataset: xquad
+subset: xquad.en
+templates:
+  10efb2e0-390c-4bab-9dc7-d90db707b6ae: !Template
+    answer_choices: null
+    id: 10efb2e0-390c-4bab-9dc7-d90db707b6ae
+    jinja: '{{context}}
+
+
+      Generate a question from the above passage : ||| {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: temp-4
+    reference: ''
+  120fffe0-b752-43f8-bf50-ecf009703ef0: !Template
+    answer_choices: null
+    id: 120fffe0-b752-43f8-bf50-ecf009703ef0
+    jinja: '{{context}}
+
+
+      Q: {{question}}
+
+
+      Referring to the passage above, the correct answer to the given question is
+      ||| {{answers["text"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: temp-5
+    reference: ''
+  32a9896f-34d5-4bde-8843-6d01d4621016: !Template
+    answer_choices: null
+    id: 32a9896f-34d5-4bde-8843-6d01d4621016
+    jinja: "{{context}}\n\nWith reference to the above context, {{question}} ||| \n\
+      \n{{answers[\"text\"][0]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: basic- answer the question
+    reference: ''
+  4bae0661-a3e5-448a-bfa2-69b096b01283: !Template
+    answer_choices: null
+    id: 4bae0661-a3e5-448a-bfa2-69b096b01283
+    jinja: '{{context}}
+
+
+      From the above passage a reasonable question with "{{answers["text"][0]}}" as
+      the answer would be: ||| {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: jeopardy
+    reference: jeopardy style- wiki_qa
+  90b53380-5c3b-4884-8cd1-9b4316da7993: !Template
+    answer_choices: null
+    id: 90b53380-5c3b-4884-8cd1-9b4316da7993
+    jinja: 'Refer to the passage below and answer the following question:
+
+
+      Passage: {{context}}
+
+
+      Question: {{question}}
+
+      |||
+
+      {{answers["text"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: temp-2
+    reference: ''
+  9cff064e-97e0-4026-94bc-3f7987856ec7: !Template
+    answer_choices: null
+    id: 9cff064e-97e0-4026-94bc-3f7987856ec7
+    jinja: '{{context}}
+
+
+      Q: {{question}}
+
+
+      A: ||| {{answers["text"][0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: temp-3
+    reference: ''
diff --git a/promptsource/templates/xquad_r/en/templates.yaml b/promptsource/templates/xquad_r/en/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5795b4c2c8f95c6bc5ec00c7231b2519b1063c56
--- /dev/null
+++ b/promptsource/templates/xquad_r/en/templates.yaml
@@ -0,0 +1,142 @@
+dataset: xquad_r
+subset: en
+templates:
+  4c005896-9534-47fe-a765-b2fa8666ea85: !Template
+    answer_choices: null
+    id: 4c005896-9534-47fe-a765-b2fa8666ea85
+    jinja: 'Text: {{context}}
+
+
+      Find the answer to the below question from the above text.
+
+
+      Question: {{question}} |||
+
+      {{answers.text[0]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template 2
+    reference: ''
+  50fc3bd0-7dcd-40e6-b73d-8c255718ba20: !Template
+    answer_choices: null
+    id: 50fc3bd0-7dcd-40e6-b73d-8c255718ba20
+    jinja: 'Given a paragraph and a question, can you find the answer in the paragraph?
+
+
+      {{context}}
+
+
+      {{question}}|||
+
+      {{answers.text[0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template 5
+    reference: ''
+  5417ae40-e0ac-40c7-bbbf-f999611b0e03: !Template
+    answer_choices: null
+    id: 5417ae40-e0ac-40c7-bbbf-f999611b0e03
+    jinja: '{{question}}
+
+
+      Refer to the context below and answer the given question.
+
+
+      {{context}}|||
+
+      {{answers.text[0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template 6
+    reference: ''
+  5b3b8c55-b270-40d9-ae97-430133528aa9: !Template
+    answer_choices: null
+    id: 5b3b8c55-b270-40d9-ae97-430133528aa9
+    jinja: "Given the following paragraph, frame a question for which the answer would\
+      \ be: \n\n{{answers.text[0]}}\n\n{{context}}|||\n{{question}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template 8
+    reference: ''
+  8db592de-7185-4e3d-922d-f6c9aa22cd8f: !Template
+    answer_choices: null
+    id: 8db592de-7185-4e3d-922d-f6c9aa22cd8f
+    jinja: '{{context}}
+
+
+      {{question}}
+
+
+      What answers the given query?|||
+
+
+      {{answers.text[0]}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template 3
+    reference: ''
+  b04111f8-d139-4b47-90ae-cc255998951f: !Template
+    answer_choices: null
+    id: b04111f8-d139-4b47-90ae-cc255998951f
+    jinja: '{{context}}
+
+
+      What is the question for which the answer is ''{{answers.text[0]}}''? |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: Template 9
+    reference: ''
+  eb0e4eb2-8235-42de-a22a-7ea1cfe98f69: !Template
+    answer_choices: null
+    id: eb0e4eb2-8235-42de-a22a-7ea1cfe98f69
+    jinja: 'Read the paragraph and answer the question that follows.
+
+
+      {{context}}
+
+
+      {{question}} |||
+
+      {{answers.text[0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template 1
+    reference: ''
+  f6442d4b-02bb-4cdf-95f6-f148253b8b5b: !Template
+    answer_choices: null
+    id: f6442d4b-02bb-4cdf-95f6-f148253b8b5b
+    jinja: '{{context}}
+
+
+      {{question}}
+
+
+      What is the answer to the question?|||
+
+      Answer: {{answers.text[0]}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: Template 7
+    reference: ''
diff --git a/promptsource/templates/xsum/templates.yaml b/promptsource/templates/xsum/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f2143ed2caa4cbe07ffab04b5d0b20c59f4eb792
--- /dev/null
+++ b/promptsource/templates/xsum/templates.yaml
@@ -0,0 +1,160 @@
+dataset: xsum
+templates:
+  13c02904-e4e2-4b4f-b115-44b437d22041: !Template
+    answer_choices: null
+    id: 13c02904-e4e2-4b4f-b115-44b437d22041
+    jinja: '{{document}}
+
+
+      ===
+
+
+      Write a summary of the text above : ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: DOC_write_summary_of_above
+    reference: ''
+  30292806-8e58-463c-8d92-ba525411c6fa: !Template
+    answer_choices: null
+    id: 30292806-8e58-463c-8d92-ba525411c6fa
+    jinja: 'Article: {{document}}
+
+
+      Summary: ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: article_DOC_summary
+    reference: Prefix-Tuning
+  3d388a1e-3361-407b-baa7-61397cc58382: !Template
+    answer_choices: null
+    id: 3d388a1e-3361-407b-baa7-61397cc58382
+    jinja: '{{document}}
+
+      How would you rephrase that in a few words? ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: DOC_how_would_you_rephrase_few_words
+    reference: http://gptprompts.wikidot.com/prompt:summarization
+  4cfe4126-b9f5-44eb-8a98-973987c5f32e: !Template
+    answer_choices: null
+    id: 4cfe4126-b9f5-44eb-8a98-973987c5f32e
+    jinja: 'My college roommate asked me what this article means:
+
+
+      {{document}}
+
+
+      So I recapped it in layman''s terms: ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: college_roommate_asked_DOC_so_I_recap
+    reference: http://gptprompts.wikidot.com/prompt:summarization
+  57a7a3f1-91f8-4f4b-b72d-745d7cb7b1e3: !Template
+    answer_choices: null
+    id: 57a7a3f1-91f8-4f4b-b72d-745d7cb7b1e3
+    jinja: '{{document}}
+
+      This boils down to the simple idea that ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: DOC_boils_down_to_simple_idea_that
+    reference: http://gptprompts.wikidot.com/prompt:summarization
+  65a3c419-57e9-48c2-b090-0c5d7adb23c6: !Template
+    answer_choices: null
+    id: 65a3c419-57e9-48c2-b090-0c5d7adb23c6
+    jinja: 'Summarize: {{document}}|||
+
+      {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: summarize_DOC
+    reference: ''
+  752fda48-e64c-47a7-8342-17c2c113f600: !Template
+    answer_choices: null
+    id: 752fda48-e64c-47a7-8342-17c2c113f600
+    jinja: 'Summarize this document: {{document}}
+
+      Summary: ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: summarize_this_DOC_summary
+    reference: ''
+  826ffcd4-c0e6-4f4c-bd9a-fcf8ee169ede: !Template
+    answer_choices: null
+    id: 826ffcd4-c0e6-4f4c-bd9a-fcf8ee169ede
+    jinja: '{{document}}
+
+
+      ===
+
+
+      Given the above document, write one sentence to summarize: ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: DOC_given_above_write_one_sentence
+    reference: ''
+  9a3f617f-628f-4fa5-9b74-47d0b166a487: !Template
+    answer_choices: null
+    id: 9a3f617f-628f-4fa5-9b74-47d0b166a487
+    jinja: 'First, please read the article below.
+
+
+      {{document}}
+
+
+      Now, can you write me an extremely short abstract for it?  ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: read_below_DOC_write_abstract
+    reference: ''
+  d878b768-9da2-4d9d-9517-1edcca3b1b26: !Template
+    answer_choices: null
+    id: d878b768-9da2-4d9d-9517-1edcca3b1b26
+    jinja: '{{document}}
+
+
+      TL;DR: ||| {{summary}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - ROUGE
+      - BLEU
+      original_task: true
+    name: DOC_tldr
+    reference: GPT-2 TLDR
diff --git a/promptsource/templates/yahoo_answers_qa/templates.yaml b/promptsource/templates/yahoo_answers_qa/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..954d37792ffff625dc9e61233a351547feb46fa5
--- /dev/null
+++ b/promptsource/templates/yahoo_answers_qa/templates.yaml
@@ -0,0 +1,71 @@
+dataset: yahoo_answers_qa
+templates:
+  1a6eda86-debc-4681-b643-f1f16fedd713: !Template
+    answer_choices: null
+    id: 1a6eda86-debc-4681-b643-f1f16fedd713
+    jinja: "{% if nbestanswers|length > 1 %} \n\nGive the top two answers for the\
+      \ following question:\n\n{{question}} |||\nThe best answer is {{nbestanswers[0]}}\
+      \ and the second best answer is {{nbestanswers[1]}}\n\n{% endif %}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: n_best_answer
+    reference: ''
+  2c214261-f32b-42ab-ac90-b22f4f2f465f: !Template
+    answer_choices: null
+    id: 2c214261-f32b-42ab-ac90-b22f4f2f465f
+    jinja: 'What is the category of the following question?
+
+
+      {{question}} |||
+
+      {{main_category}}
+
+      '
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: category
+    reference: ''
+  7ba7a99c-be14-47d4-859d-093ce07a2798: !Template
+    answer_choices: null
+    id: 7ba7a99c-be14-47d4-859d-093ce07a2798
+    jinja: 'Given the top answers , {{ nbestanswers|join(", ")  }}, generate the question
+      |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate_question
+    reference: ''
+  7fc9307b-99ad-457a-8b60-c44bd6b2d86c: !Template
+    answer_choices: null
+    id: 7fc9307b-99ad-457a-8b60-c44bd6b2d86c
+    jinja: 'Given the best answer , "{{answer}}", generate the question |||
+
+      {{question}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_question
+    reference: ''
+  f0eeb5cb-1364-44c2-b284-de67452aef6d: !Template
+    answer_choices: null
+    id: f0eeb5cb-1364-44c2-b284-de67452aef6d
+    jinja: 'What is the best answer for the following question?
+
+
+      {{question}} |||
+
+      {{answer}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: best_answer
+    reference: ''
diff --git a/promptsource/templates/yahoo_answers_topics/templates.yaml b/promptsource/templates/yahoo_answers_topics/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7094f50ff0519c28330531326b57d21fae090f54
--- /dev/null
+++ b/promptsource/templates/yahoo_answers_topics/templates.yaml
@@ -0,0 +1,84 @@
+dataset: yahoo_answers_topics
+templates:
+  21e4d34e-0f80-4056-8870-bb303ba12dca: !Template
+    answer_choices: null
+    id: 21e4d34e-0f80-4056-8870-bb303ba12dca
+    jinja: '
+
+      Given the answer below suggest a possible question title:
+
+
+      Answer: {{ best_answer}} |||
+
+      {{ question_title}}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answer_from_qn
+    reference: ''
+  253315cd-dd9d-4c15-82a0-9c8138a70d94: !Template
+    answer_choices: null
+    id: 253315cd-dd9d-4c15-82a0-9c8138a70d94
+    jinja: 'Given the following email body which contains details for a question,
+      suggest a subject for this email:
+
+
+      Email body: {{question_content}} |||
+
+      {{ question_title }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: subject
+    reference: ''
+  32ca4b76-fbb1-4846-94c9-9e968c627ed9: !Template
+    answer_choices: null
+    id: 32ca4b76-fbb1-4846-94c9-9e968c627ed9
+    jinja: 'Given the question title and the question content, answer the question:
+
+
+      Question Title: {{ question_title }}
+
+
+      Question content: {{ question_content }} |||
+
+      {{ best_answer }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: qa
+    reference: ''
+  568a5d5e-ec5c-4ceb-9620-df5e86280143: !Template
+    answer_choices: null
+    id: 568a5d5e-ec5c-4ceb-9620-df5e86280143
+    jinja: "{% set topics = [\n  \"Society & Culture\",\n  \"Science & Mathematics\"\
+      ,\n  \"Health\",\n  \"Education & Reference\",\n  \"Computers & Internet\",\n\
+      \  \"Sports\",\n  \"Business & Finance\",\n  \"Entertainment & Music\",\n  \"\
+      Family & Relationships\",\n  \"Politics & Government\"\n] %}\nGiven the question\
+      \ title and the question content, classify the question into one of these topics,\
+      \ {{topics|join(', ')}}:\n\nQuestion Title: {{ question_title }}\n\nQuestion\
+      \ content: {{ question_content }} |||\n{{ topics[topic]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: topic
+    reference: ''
+  b56d1289-d3df-4e66-88b5-737dce09b467: !Template
+    answer_choices: null
+    id: b56d1289-d3df-4e66-88b5-737dce09b467
+    jinja: "{% set topics = [\n  \"Society & Culture\",\n  \"Science & Mathematics\"\
+      ,\n  \"Health\",\n  \"Education & Reference\",\n  \"Computers & Internet\",\n\
+      \  \"Sports\",\n  \"Business & Finance\",\n  \"Entertainment & Music\",\n  \"\
+      Family & Relationships\",\n  \"Politics & Government\"\n] %}\nGiven the question\
+      \ title below, classify the question into one of these topics, {{topics|join(',\
+      \ ')}} :\n\nQuestion Title: {{ question_title }} |||\n{{ topics[topic]}}"
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: title_class
+    reference: ''
diff --git a/promptsource/templates/yelp_polarity/templates.yaml b/promptsource/templates/yelp_polarity/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..340a95875c5faf22f18b5add68dd9b6f9a259416
--- /dev/null
+++ b/promptsource/templates/yelp_polarity/templates.yaml
@@ -0,0 +1,110 @@
+dataset: yelp_polarity
+templates:
+  01dc166f-0774-4be0-b606-2beb2252d9b5: !Template
+    answer_choices: bad ||| good
+    id: 01dc166f-0774-4be0-b606-2beb2252d9b5
+    jinja: '{{ text }} Overall, the experience is ||| {{ answer_choices[label] }}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: experience_good_bad
+    reference: ''
+  13f829c1-ca58-4efd-b1f0-14446b176871: !Template
+    answer_choices: low ||| high
+    id: 13f829c1-ca58-4efd-b1f0-14446b176871
+    jinja: '{{ text }} Based on that, my rating for this place is ||| {{ answer_choices[label]
+      }}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: rating_high_low
+    reference: ''
+  19e426b1-26e6-462e-a556-0ec3b9402e13: !Template
+    answer_choices: Yes ||| No
+    id: 19e426b1-26e6-462e-a556-0ec3b9402e13
+    jinja: '{{ text }} If you ask me whether I regret it? ||| {{ answer_choices[label]
+      }}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: regret_yes_or_no
+    reference: ''
+  363d31a1-6706-47fd-ad7e-d648cf23bbaa: !Template
+    answer_choices: no ||| yes
+    id: 363d31a1-6706-47fd-ad7e-d648cf23bbaa
+    jinja: '{{ text }} If you ask me whether I will come again, my answer is ||| {{
+      answer_choices[label] }}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: come_again
+    reference: ''
+  a5ec86fb-046d-482b-a552-8499b8b59b8f: !Template
+    answer_choices: No ||| Yes
+    id: a5ec86fb-046d-482b-a552-8499b8b59b8f
+    jinja: 'Review:
+
+      {{ text }}
+
+
+      Will you come here again? |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: format_come_again
+    reference: Simulating website
+  e0cc8573-5eda-413a-a20f-1da3e7077736: !Template
+    answer_choices: dislike ||| like
+    id: e0cc8573-5eda-413a-a20f-1da3e7077736
+    jinja: '{{ text }} That being said, I ||| {{ answer_choices[label] }} this place.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: like_dislike
+    reference: ''
+  e14da8aa-2995-4aed-a90b-adf2bec2b90e: !Template
+    answer_choices: Bad ||| Good
+    id: e14da8aa-2995-4aed-a90b-adf2bec2b90e
+    jinja: 'Review:
+
+      {{ text }}
+
+
+      Overall rating: |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: format_good_bad
+    reference: Simulating the website.
+  e7bb6ec7-921a-4889-93e9-84d957c6035b: !Template
+    answer_choices: bad ||| good
+    id: e7bb6ec7-921a-4889-93e9-84d957c6035b
+    jinja: '{{ text }} In a nutshell, this place is ||| {{ answer_choices[label] }}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: true
+    name: place_good_bad
+    reference: ''
+  fb412829-d8a3-4faa-8443-a2ebe6545f6c: !Template
+    answer_choices: no ||| yes
+    id: fb412829-d8a3-4faa-8443-a2ebe6545f6c
+    jinja: '{{ text }} If you ask me whether I like this place? The answer is |||
+      {{ answer_choices[label] }}.'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: false
+    name: like_dislike_2
+    reference: ''
diff --git a/promptsource/templates/yelp_review_full/templates.yaml b/promptsource/templates/yelp_review_full/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7ac17c431d507559e9eb2aed1114ac4762be1517
--- /dev/null
+++ b/promptsource/templates/yelp_review_full/templates.yaml
@@ -0,0 +1,109 @@
+dataset: yelp_review_full
+templates:
+  135fcd11-9fcc-4b55-bf1b-9b76290d0f6b: !Template
+    answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+    id: 135fcd11-9fcc-4b55-bf1b-9b76290d0f6b
+    jinja: '{{ text }}
+
+      So I would like to give it ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: so_i_would
+    reference: ''
+  27b6bc81-bb1c-467b-91c0-22a4d6a19f44: !Template
+    answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+    id: 27b6bc81-bb1c-467b-91c0-22a4d6a19f44
+    jinja: '{{ text }}
+
+      ===
+
+      Based on that, my rating is ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: based_on_that
+    reference: ''
+  29fc6386-90b3-4976-b249-26e49fe7c924: !Template
+    answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+    id: 29fc6386-90b3-4976-b249-26e49fe7c924
+    jinja: 'Review text:
+
+      {{ text }}
+
+
+      Stars: |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: format_star
+    reference: simulating webpage
+  2a57af86-e25a-4572-ba9e-aa921842c04b: !Template
+    answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+    id: 2a57af86-e25a-4572-ba9e-aa921842c04b
+    jinja: '{{ text }} My rating for this place is ||| {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: this_place
+    reference: ''
+  4dd990b3-7201-4cba-bb9a-baa462d68b1a: !Template
+    answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+    id: 4dd990b3-7201-4cba-bb9a-baa462d68b1a
+    jinja: 'Review text:
+
+      {{ text }}
+
+
+      Review score (between 1 and 5): |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: format_score
+    reference: Simulating webpage
+  6d4bfb59-4260-40a5-9da5-e061720bd430: !Template
+    answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5
+    id: 6d4bfb59-4260-40a5-9da5-e061720bd430
+    jinja: 'Review: {{text}}
+
+      On a scale of 1 to 5, I would give this product ||| {{ answer_choices[label]
+      }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: on_a_scale
+    reference: ''
+  e8091beb-c0fa-490d-9e0c-32eb6907dbc0: !Template
+    answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars
+    id: e8091beb-c0fa-490d-9e0c-32eb6907dbc0
+    jinja: 'Review text:
+
+      {{ text }}
+
+
+      Review rating: |||
+
+      {{ answer_choices[label] }}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: false
+      metrics:
+      - Accuracy
+      original_task: true
+    name: format_rating
+    reference: It's simulating the format of a webpage.
diff --git a/promptsource/templates/zest/templates.yaml b/promptsource/templates/zest/templates.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7fa523a3299190be48017f46d8360f2d2f20fded
--- /dev/null
+++ b/promptsource/templates/zest/templates.yaml
@@ -0,0 +1,184 @@
+dataset: zest
+templates:
+  223c6226-5f2d-4dd8-9710-4657ffb54f13: !Template
+    answer_choices: null
+    id: 223c6226-5f2d-4dd8-9710-4657ffb54f13
+    jinja: '{{context}}
+
+      {{question}}|||
+
+      {% if answer[0] == "n/a" %}
+
+      I don''t know
+
+      {% else %}
+
+      {{answer[0]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: concatenate
+    reference: Concatenate question and question
+  2283cebf-988e-4bff-96bf-982a09963e49: !Template
+    answer_choices: null
+    id: 2283cebf-988e-4bff-96bf-982a09963e49
+    jinja: 'Decide whether the question "{{question}}" is answerable solely based
+      on this passage: {{context}}|||
+
+      {% if answer[0] == "n/a" %}
+
+      No
+
+      {% else %}
+
+      Yes
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: answerable_or_not
+    reference: Decide whether this question is answerable
+  6f694e45-1d17-4067-a1f6-7dae89c148db: !Template
+    answer_choices: null
+    id: 6f694e45-1d17-4067-a1f6-7dae89c148db
+    jinja: 'My daughter is asking me a question about {{domain | replace("_", " ")}}:
+      {{question}}
+
+      Here''s what I found on the internet: {{context}}
+
+      What''s the answer?
+
+      Answer: |||
+
+      {% if answer[0] == "n/a" %}
+
+      Can''t answer
+
+      {% else %}
+
+      {{answer[0]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: curious_kid
+    reference: Answer the questions of a curious kid
+  7425232a-9880-428c-9ddc-4070e50e22cc: !Template
+    answer_choices: null
+    id: 7425232a-9880-428c-9ddc-4070e50e22cc
+    jinja: 'Answer the question based on the context. If the question is not answerable
+      with the context alone, say "can''t answer".
+
+      {{context}}
+
+      {{question}}|||
+
+      {% if answer[0] == "n/a" %}
+
+      Can''t answer
+
+      {% else %}
+
+      {{answer[0]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: gpt3instruct_format
+    reference: Template format from GPT3 instruct
+  846cc8ff-0527-4b2f-8da4-46613e915ff5: !Template
+    answer_choices: null
+    id: 846cc8ff-0527-4b2f-8da4-46613e915ff5
+    jinja: '{% if answer[0] != "n/a" %}{{context}}
+
+      Based on the previous passage, generate a question which has the following passage
+      for an answer {{answer[0]}}.
+
+      |||
+
+      {{question}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate_the_question
+    reference: Generate the question
+  a69d7845-8503-48c4-b3d2-17bdc6820794: !Template
+    answer_choices: null
+    id: a69d7845-8503-48c4-b3d2-17bdc6820794
+    jinja: '{% if answer[0] != "n/a" %}Generate a question about {{domain | replace("_",
+      " ")}} based on this passage: {{context}}.
+
+      The answer to the question should be: {{answer[0]}}.
+
+      |||
+
+      {{question}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: generate_the_question_with_domain
+    reference: Generate the question with domain
+  bdaf4f8a-2344-4e46-a52b-2045a080a4b2: !Template
+    answer_choices: null
+    id: bdaf4f8a-2344-4e46-a52b-2045a080a4b2
+    jinja: 'Answer this question about {{domain | replace("_", " ")}} based on the
+      context. If the question is not answerable with the context alone, say "can''t
+      answer".
+
+      {{context}}
+
+      {{question}}|||
+
+      {% if answer[0] == "n/a" %}
+
+      Can''t answer
+
+      {% else %}
+
+      {{answer[0]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: gpt3instruct_format_with_domain
+    reference: Template format from GPT3 instruct with the question's domain
+  cd563834-49ee-495d-ac46-99f0264e58d5: !Template
+    answer_choices: null
+    id: cd563834-49ee-495d-ac46-99f0264e58d5
+    jinja: 'I am giving my students the following question "{{question}}" about {{domain
+      | replace("_", " ")}}.
+
+      What should be their answer based on this context: {{context}}|||
+
+      {% if answer[0] == "n/a" %}
+
+      I don''t know
+
+      {% else %}
+
+      {{answer[0]}}
+
+      {% endif %}'
+    metadata: !TemplateMetadata
+      choices_in_prompt: null
+      metrics: []
+      original_task: null
+    name: teacher_student
+    reference: I don't know answer
diff --git a/promptsource/utils.py b/promptsource/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ecf3a45bd18d089969c8e6e229a9ef13a63af26
--- /dev/null
+++ b/promptsource/utils.py
@@ -0,0 +1,126 @@
+# coding=utf-8
+
+import datasets
+import requests
+
+from promptsource.templates import INCLUDED_USERS
+
+
+def removeHyphen(example):
+    example_clean = {}
+    for key in example.keys():
+        if "-" in key:
+            new_key = key.replace("-", "_")
+            example_clean[new_key] = example[key]
+        else:
+            example_clean[key] = example[key]
+    example = example_clean
+    return example
+
+
+def renameDatasetColumn(dataset):
+    col_names = dataset.column_names
+    for cols in col_names:
+        if "-" in cols:
+            dataset = dataset.rename_column(cols, cols.replace("-", "_"))
+    return dataset
+
+
+#
+# Helper functions for datasets library
+#
+
+
+def get_dataset_builder(path, conf=None):
+    "Get a dataset builder from name and conf."
+    module_path = datasets.load.prepare_module(path, dataset=True)
+    builder_cls = datasets.load.import_main_class(module_path[0], dataset=True)
+    if conf:
+        builder_instance = builder_cls(name=conf, cache_dir=None, hash=module_path[1])
+    else:
+        builder_instance = builder_cls(cache_dir=None, hash=module_path[1])
+    return builder_instance
+
+
+def get_dataset(path, conf=None):
+    "Get a dataset from name and conf."
+    builder_instance = get_dataset_builder(path, conf)
+    if builder_instance.manual_download_instructions is None and builder_instance.info.size_in_bytes is not None:
+        builder_instance.download_and_prepare()
+        return builder_instance.as_dataset()
+    else:
+        return datasets.load_dataset(path, conf)
+
+
+def get_dataset_confs(path):
+    "Get the list of confs for a dataset."
+    module_path = datasets.load.prepare_module(path, dataset=True)
+    # Get dataset builder class from the processing script
+    builder_cls = datasets.load.import_main_class(module_path[0], dataset=True)
+    # Instantiate the dataset builder
+    confs = builder_cls.BUILDER_CONFIGS
+    if confs and len(confs) > 1:
+        return confs
+    return []
+
+
+def render_features(features):
+    """Recursively render the dataset schema (i.e. the fields)."""
+    if isinstance(features, dict):
+        return {k: render_features(v) for k, v in features.items()}
+    if isinstance(features, datasets.features.ClassLabel):
+        return features.names
+
+    if isinstance(features, datasets.features.Value):
+        return features.dtype
+
+    if isinstance(features, datasets.features.Sequence):
+        return {"[]": render_features(features.feature)}
+    return features
+
+
+#
+# Loads dataset information
+#
+
+
+def filter_english_datasets():
+    """
+    Filter English datasets based on language tags in metadata.
+
+    Also includes the datasets of any users listed in INCLUDED_USERS
+    """
+    english_datasets = []
+
+    response = requests.get("https://huggingface.co/api/datasets?full=true")
+    tags = response.json()
+
+    for dataset in tags:
+        dataset_name = dataset["id"]
+
+        is_community_dataset = "/" in dataset_name
+        if is_community_dataset:
+            user = dataset_name.split("/")[0]
+            if user in INCLUDED_USERS:
+                english_datasets.append(dataset_name)
+            continue
+
+        if "card_data" not in dataset:
+            continue
+        metadata = dataset["card_data"]
+
+        if "languages" not in metadata:
+            continue
+        languages = metadata["languages"]
+
+        if "en" in languages or "en-US" in languages:
+            english_datasets.append(dataset_name)
+
+    return sorted(english_datasets)
+
+
+def list_datasets(template_collection, _state):
+    """Get all the datasets to work with."""
+    dataset_list = filter_english_datasets()
+    dataset_list.sort(key=lambda x: x.lower())
+    return dataset_list
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..29044fdfc269550baacddd604b9b290a6f7324e4
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,18 @@
+black
+datasets>=1.7.0
+flake8
+isort==5.8.0
+pytest
+pyyaml>=5
+streamlit==0.82
+jinja2
+plotly
+requests
+pandas
+##############################################################
+# Dependencies in this section are added for specific datasets
+##############################################################
+py7zr
+##############################################################
+# End of dataset-specific dependencies
+##############################################################
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..7d20505c0dafb3a7496affc0dd0584c8c20a793d
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,13 @@
+[isort]
+ensure_newline_before_comments = True
+force_grid_wrap = 0
+include_trailing_comma = True
+line_length = 119
+lines_after_imports = 2
+multi_line_output = 3
+use_parentheses = True
+
+
+[flake8]
+ignore = E203, E501, W503
+max-line-length = 119
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6d1150e4c3399d21349c661f0e7e63693acc859
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,32 @@
+from setuptools import setup, find_packages
+
+with open('README.md') as readme_file:
+    readme = readme_file.read()
+
+setup(
+    name='promptsource',
+    version='0.1.0',
+    url='https://github.com/bigscience-workshop/promptsource.git',
+    author='Multiple Authors',
+    author_email='xxx',
+    python_requires='>=3.7, <3.8',
+    classifiers=[
+        'Development Status :: 2 - Pre-Alpha',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: Apache Software License',
+        'Natural Language :: English',
+        'Programming Language :: Python :: 3',
+        'Programming Language :: Python :: 3.7',
+    ],
+    description='Toolkit for collecting and applying templates of prompting instances.',
+    packages=find_packages(),
+    license="Apache Software License 2.0",
+    long_description=readme,
+    package_data={"": [
+        "templates/*/*.yaml",
+        "templates/*/*/*.yaml",
+        "seqio_tasks/experiment_D3.csv",  # Experiment D3
+        "seqio_tasks/experiment_D4.csv",
+        "custom_datasets/*/*"
+    ]}
+)