rishabbala commited on
Commit
4da34e9
·
verified ·
1 Parent(s): 38e0ba1

Delete loading script

Browse files
Files changed (1) hide show
  1. math_qa.py +0 -84
math_qa.py DELETED
@@ -1,84 +0,0 @@
1
- """TODO(math_qa): Add a description here."""
2
-
3
-
4
- import json
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- # TODO(math_qa): BibTeX citation
11
- _CITATION = """
12
- """
13
-
14
- # TODO(math_qa):
15
- _DESCRIPTION = """
16
- Our dataset is gathered by using a new representation language to annotate over the AQuA-RAT dataset. AQuA-RAT has provided the questions, options, rationale, and the correct options.
17
- """
18
- _URL = "https://math-qa.github.io/math-QA/data/MathQA.zip"
19
-
20
-
21
- class MathQa(datasets.GeneratorBasedBuilder):
22
- """TODO(math_qa): Short description of my dataset."""
23
-
24
- # TODO(math_qa): Set up version.
25
- VERSION = datasets.Version("0.1.0")
26
-
27
- def _info(self):
28
- # TODO(math_qa): Specifies the datasets.DatasetInfo object
29
- return datasets.DatasetInfo(
30
- # This is the description that will appear on the datasets page.
31
- description=_DESCRIPTION,
32
- # datasets.features.FeatureConnectors
33
- features=datasets.Features(
34
- {
35
- # These are the features of your dataset like images, labels ...
36
- "Problem": datasets.Value("string"),
37
- "Rationale": datasets.Value("string"),
38
- "options": datasets.Value("string"),
39
- "correct": datasets.Value("string"),
40
- "annotated_formula": datasets.Value("string"),
41
- "linear_formula": datasets.Value("string"),
42
- "category": datasets.Value("string"),
43
- }
44
- ),
45
- # If there's a common (input, target) tuple from the features,
46
- # specify them here. They'll be used if as_supervised=True in
47
- # builder.as_dataset.
48
- supervised_keys=None,
49
- # Homepage of the dataset for documentation
50
- homepage="https://math-qa.github.io/math-QA/",
51
- citation=_CITATION,
52
- )
53
-
54
- def _split_generators(self, dl_manager):
55
- """Returns SplitGenerators."""
56
- # TODO(math_qa): Downloads the data and defines the splits
57
- # dl_manager is a datasets.download.DownloadManager that can be used to
58
- # download and extract URLs
59
- dl_path = dl_manager.download_and_extract(_URL)
60
- return [
61
- datasets.SplitGenerator(
62
- name=datasets.Split.TRAIN,
63
- # These kwargs will be passed to _generate_examples
64
- gen_kwargs={"filepath": os.path.join(dl_path, "train.json")},
65
- ),
66
- datasets.SplitGenerator(
67
- name=datasets.Split.TEST,
68
- # These kwargs will be passed to _generate_examples
69
- gen_kwargs={"filepath": os.path.join(dl_path, "test.json")},
70
- ),
71
- datasets.SplitGenerator(
72
- name=datasets.Split.VALIDATION,
73
- # These kwargs will be passed to _generate_examples
74
- gen_kwargs={"filepath": os.path.join(dl_path, "dev.json")},
75
- ),
76
- ]
77
-
78
- def _generate_examples(self, filepath):
79
- """Yields examples."""
80
- # TODO(math_qa): Yields (key, example) tuples from the dataset
81
- with open(filepath, encoding="utf-8") as f:
82
- data = json.load(f)
83
- for id_, row in enumerate(data):
84
- yield id_, row