Zhangir Azerbayev commited on
Commit
ab0d7f5
·
1 Parent(s): 4ca8ed0
Files changed (2) hide show
  1. proof-pile.py +18 -30
  2. test.py +1 -1
proof-pile.py CHANGED
@@ -143,13 +143,11 @@ class ProofPile(datasets.GeneratorBasedBuilder):
143
  train_paths = ["wiki/proofwiki.tar.gz", "wiki/wikipedia.tar.gz"]
144
  val_paths = ["wiki/proofwiki_val.tar.gz"]
145
 
146
- train_files = itertools.chain.from_iterable(dl_manager.iter_archive(x) for x in train_paths)
147
- val_files = itertools.chain.from_iterable(dl_manager.iter_archive(x) for x in val_paths)
148
-
149
- if self.config.name=="math-dataset":
150
- train_files = dl_manager.download_and_extract("math-dataset/train.tar.gz")
151
- val_files = dl_manager.download_and_extract("math-datset/val.tar.gz")
152
-
153
  return [
154
  datasets.SplitGenerator(
155
  name=datasets.Split.TRAIN,
@@ -167,7 +165,7 @@ class ProofPile(datasets.GeneratorBasedBuilder):
167
  ),
168
  ]
169
  else:
170
- with open("splits.json") as f:
171
  splits = json.load(f)
172
 
173
  return [
@@ -175,14 +173,14 @@ class ProofPile(datasets.GeneratorBasedBuilder):
175
  name=datasets.Split.TRAIN,
176
  # These kwargs will be passed to _generate_examples
177
  gen_kwargs={
178
- "data_files": splits[self.config.name + "-train"],
179
  },
180
  ),
181
  datasets.SplitGenerator(
182
  name=datasets.Split.VALIDATION,
183
  # These kwargs will be passed to _generate_examples
184
  gen_kwargs={
185
- "data_files": splits[self.config.name + "-valid"],
186
  },
187
  ),
188
  ]
@@ -192,23 +190,13 @@ class ProofPile(datasets.GeneratorBasedBuilder):
192
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
193
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
194
  key = 0
195
- if self.config.name in self.archived_configs:
196
- for name, obj in data_files:
197
- text = obj.read()
198
- yield key, {
199
- "config": self.config.name,
200
- "file": name,
201
- "text": text,
202
- }
203
- key += 1
204
- else:
205
- for name in data_files:
206
- with open(name, encoding="utf-8") as f:
207
- text = f.read()
208
- # Yields examples as (key, example) tuples
209
- yield key, {
210
- "config": self.config.name,
211
- "file": name,
212
- "text": text,
213
- }
214
- key += 1
 
143
  train_paths = ["wiki/proofwiki.tar.gz", "wiki/wikipedia.tar.gz"]
144
  val_paths = ["wiki/proofwiki_val.tar.gz"]
145
 
146
+ train_files = itertools.chain.from_iterable(dl_manager.iter_files(dl_manager.download_and_extract(x))
147
+ for x in train_paths)
148
+ val_files = itertools.chain.from_iterable(dl_manager.iter_files(dl_manager.download_and_extract(x))
149
+ for x in val_paths)
150
+
 
 
151
  return [
152
  datasets.SplitGenerator(
153
  name=datasets.Split.TRAIN,
 
165
  ),
166
  ]
167
  else:
168
+ with open(dl_manager.download("splits.json")) as f:
169
  splits = json.load(f)
170
 
171
  return [
 
173
  name=datasets.Split.TRAIN,
174
  # These kwargs will be passed to _generate_examples
175
  gen_kwargs={
176
+ "data_files": [dl_manager.download(x) for x in splits[self.config.name + "-train"]],
177
  },
178
  ),
179
  datasets.SplitGenerator(
180
  name=datasets.Split.VALIDATION,
181
  # These kwargs will be passed to _generate_examples
182
  gen_kwargs={
183
+ "data_files": [dl_manager.download(x) for x in splits[self.config.name + "-valid"]],
184
  },
185
  ),
186
  ]
 
190
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
191
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
192
  key = 0
193
+ for name in data_files:
194
+ with open(name, encoding="utf-8") as f:
195
+ text = f.read()
196
+ # Yields examples as (key, example) tuples
197
+ yield key, {
198
+ "config": self.config.name,
199
+ "file": name,
200
+ "text": text,
201
+ }
202
+ key += 1
 
 
 
 
 
 
 
 
 
 
test.py CHANGED
@@ -5,7 +5,7 @@ import time
5
  from tqdm import tqdm
6
 
7
  total_size = 0
8
- for x in ["arxiv", "books", "formal", "stack-exchange", "wiki", "math-dataset"]:
9
  dataset = load_dataset("./proof-pile.py", x)
10
  size = dataset["train"].dataset_size / 2**30
11
  total_size += size
 
5
  from tqdm import tqdm
6
 
7
  total_size = 0
8
+ for x in ["arxiv", "books", "formal", "wiki", "stack-exchange", "math-dataset"]:
9
  dataset = load_dataset("./proof-pile.py", x)
10
  size = dataset["train"].dataset_size / 2**30
11
  total_size += size