Datasets:

ArXiv:
DOI:
License:
zhuwq0 commited on
Commit
32aa588
·
0 Parent(s):

add scripts

Browse files
Files changed (4) hide show
  1. example.py +54 -0
  2. merge_hdf5.py +65 -0
  3. quakeflow_nc.py +362 -0
  4. upload.py +11 -0
example.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ import datasets
3
+ import numpy as np
4
+ from torch.utils.data import DataLoader
5
+
6
+ quakeflow_nc = datasets.load_dataset(
7
+ "AI4EPS/quakeflow_nc",
8
+ name="station",
9
+ split="train",
10
+ # name="station_test",
11
+ # split="test",
12
+ # download_mode="force_redownload",
13
+ trust_remote_code=True,
14
+ num_proc=36,
15
+ )
16
+ # quakeflow_nc = datasets.load_dataset(
17
+ # "./quakeflow_nc.py",
18
+ # name="station",
19
+ # split="train",
20
+ # # name="statoin_test",
21
+ # # split="test",
22
+ # num_proc=36,
23
+ # )
24
+
25
+ print(quakeflow_nc)
26
+
27
+ # print the first sample of the iterable dataset
28
+ for example in quakeflow_nc:
29
+ print("\nIterable dataset\n")
30
+ print(example)
31
+ print(example.keys())
32
+ for key in example.keys():
33
+ if key == "waveform":
34
+ print(key, np.array(example[key]).shape)
35
+ else:
36
+ print(key, example[key])
37
+ break
38
+
39
+ # %%
40
+ quakeflow_nc = quakeflow_nc.with_format("torch")
41
+ dataloader = DataLoader(quakeflow_nc, batch_size=8, num_workers=0, collate_fn=lambda x: x)
42
+
43
+ for batch in dataloader:
44
+ print("\nDataloader dataset\n")
45
+ print(f"Batch size: {len(batch)}")
46
+ print(batch[0].keys())
47
+ for key in batch[0].keys():
48
+ if key == "waveform":
49
+ print(key, np.array(batch[0][key]).shape)
50
+ else:
51
+ print(key, batch[0][key])
52
+ break
53
+
54
+ # %%
merge_hdf5.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ import os
3
+
4
+ import h5py
5
+ import matplotlib.pyplot as plt
6
+ from tqdm import tqdm
7
+
8
+ # %%
9
+ h5_dir = "waveform_h5"
10
+ h5_out = "waveform.h5"
11
+ h5_train = "waveform_train.h5"
12
+ h5_test = "waveform_test.h5"
13
+
14
+ # # %%
15
+ # h5_dir = "waveform_h5"
16
+ # h5_out = "waveform.h5"
17
+ # h5_train = "waveform_train.h5"
18
+ # h5_test = "waveform_test.h5"
19
+
20
+ h5_files = sorted(os.listdir(h5_dir))
21
+ train_files = h5_files[:-1]
22
+ test_files = h5_files[-1:]
23
+ # train_files = h5_files
24
+ # train_files = [x for x in train_files if (x != "2014.h5") and (x not in [])]
25
+ # test_files = []
26
+ print(f"train files: {train_files}")
27
+ print(f"test files: {test_files}")
28
+
29
+ # %%
30
+ with h5py.File(h5_out, "w") as fp:
31
+ # external linked file
32
+ for h5_file in h5_files:
33
+ with h5py.File(os.path.join(h5_dir, h5_file), "r") as f:
34
+ for event in tqdm(f.keys(), desc=h5_file, total=len(f.keys())):
35
+ if event not in fp:
36
+ fp[event] = h5py.ExternalLink(os.path.join(h5_dir, h5_file), event)
37
+ else:
38
+ print(f"{event} already exists")
39
+ continue
40
+
41
+ # %%
42
+ with h5py.File(h5_train, "w") as fp:
43
+ # external linked file
44
+ for h5_file in train_files:
45
+ with h5py.File(os.path.join(h5_dir, h5_file), "r") as f:
46
+ for event in tqdm(f.keys(), desc=h5_file, total=len(f.keys())):
47
+ if event not in fp:
48
+ fp[event] = h5py.ExternalLink(os.path.join(h5_dir, h5_file), event)
49
+ else:
50
+ print(f"{event} already exists")
51
+ continue
52
+
53
+ # %%
54
+ with h5py.File(h5_test, "w") as fp:
55
+ # external linked file
56
+ for h5_file in test_files:
57
+ with h5py.File(os.path.join(h5_dir, h5_file), "r") as f:
58
+ for event in tqdm(f.keys(), desc=h5_file, total=len(f.keys())):
59
+ if event not in fp:
60
+ fp[event] = h5py.ExternalLink(os.path.join(h5_dir, h5_file), event)
61
+ else:
62
+ print(f"{event} already exists")
63
+ continue
64
+
65
+ # %%
quakeflow_nc.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # TODO: Address all TODOs and remove all explanatory comments
16
+ # Lint as: python3
17
+ """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
18
+
19
+
20
+ from typing import Dict, List, Optional, Tuple, Union
21
+
22
+ import datasets
23
+ import fsspec
24
+ import h5py
25
+ import numpy as np
26
+ import torch
27
+
28
+ # TODO: Add BibTeX citation
29
+ # Find for instance the citation on arxiv or on the dataset repo/website
30
+ _CITATION = """\
31
+ @InProceedings{huggingface:dataset,
32
+ title = {NCEDC dataset for QuakeFlow},
33
+ author={Zhu et al.},
34
+ year={2023}
35
+ }
36
+ """
37
+
38
+ # TODO: Add description of the dataset here
39
+ # You can copy an official description
40
+ _DESCRIPTION = """\
41
+ A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format.
42
+ """
43
+
44
+ # TODO: Add a link to an official homepage for the dataset here
45
+ _HOMEPAGE = ""
46
+
47
+ # TODO: Add the licence for the dataset here if you can find it
48
+ _LICENSE = ""
49
+
50
+ # TODO: Add link to the official dataset URLs here
51
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _REPO = "https://huggingface.co/datasets/AI4EPS/quakeflow_nc/resolve/main/waveform_h5"
54
+ _FILES = [
55
+ "1987.h5",
56
+ "1988.h5",
57
+ "1989.h5",
58
+ "1990.h5",
59
+ "1991.h5",
60
+ "1992.h5",
61
+ "1993.h5",
62
+ "1994.h5",
63
+ "1995.h5",
64
+ "1996.h5",
65
+ "1997.h5",
66
+ "1998.h5",
67
+ "1999.h5",
68
+ "2000.h5",
69
+ "2001.h5",
70
+ "2002.h5",
71
+ "2003.h5",
72
+ "2004.h5",
73
+ "2005.h5",
74
+ "2006.h5",
75
+ "2007.h5",
76
+ "2008.h5",
77
+ "2009.h5",
78
+ "2010.h5",
79
+ "2011.h5",
80
+ "2012.h5",
81
+ "2013.h5",
82
+ "2014.h5",
83
+ "2015.h5",
84
+ "2016.h5",
85
+ "2017.h5",
86
+ "2018.h5",
87
+ "2019.h5",
88
+ "2020.h5",
89
+ "2021.h5",
90
+ "2022.h5",
91
+ "2023.h5",
92
+ ]
93
+ _URLS = {
94
+ "station": [f"{_REPO}/{x}" for x in _FILES],
95
+ "event": [f"{_REPO}/{x}" for x in _FILES],
96
+ "station_train": [f"{_REPO}/{x}" for x in _FILES[:-1]],
97
+ "event_train": [f"{_REPO}/{x}" for x in _FILES[:-1]],
98
+ "station_test": [f"{_REPO}/{x}" for x in _FILES[-1:]],
99
+ "event_test": [f"{_REPO}/{x}" for x in _FILES[-1:]],
100
+ }
101
+
102
+
103
+ class BatchBuilderConfig(datasets.BuilderConfig):
104
+ """
105
+ yield a batch of event-based sample, so the number of sample stations can vary among batches
106
+ Batch Config for QuakeFlow_NC
107
+ """
108
+
109
+ def __init__(self, **kwargs):
110
+ super().__init__(**kwargs)
111
+
112
+
113
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
114
+ class QuakeFlow_NC(datasets.GeneratorBasedBuilder):
115
+ """QuakeFlow_NC: A dataset of earthquake waveforms organized by earthquake events and based on the HDF5 format."""
116
+
117
+ VERSION = datasets.Version("1.1.0")
118
+
119
+ nt = 8192
120
+
121
+ # This is an example of a dataset with multiple configurations.
122
+ # If you don't want/need to define several sub-sets in your dataset,
123
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
124
+
125
+ # If you need to make complex sub-parts in the datasets with configurable options
126
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
127
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
128
+
129
+ # You will be able to load one or the other configurations in the following list with
130
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
131
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
132
+
133
+ # default config, you can change batch_size and num_stations_list when use `datasets.load_dataset`
134
+ BUILDER_CONFIGS = [
135
+ datasets.BuilderConfig(
136
+ name="station", version=VERSION, description="yield station-based samples one by one of whole dataset"
137
+ ),
138
+ datasets.BuilderConfig(
139
+ name="event", version=VERSION, description="yield event-based samples one by one of whole dataset"
140
+ ),
141
+ datasets.BuilderConfig(
142
+ name="station_train",
143
+ version=VERSION,
144
+ description="yield station-based samples one by one of training dataset",
145
+ ),
146
+ datasets.BuilderConfig(
147
+ name="event_train", version=VERSION, description="yield event-based samples one by one of training dataset"
148
+ ),
149
+ datasets.BuilderConfig(
150
+ name="station_test", version=VERSION, description="yield station-based samples one by one of test dataset"
151
+ ),
152
+ datasets.BuilderConfig(
153
+ name="event_test", version=VERSION, description="yield event-based samples one by one of test dataset"
154
+ ),
155
+ ]
156
+
157
+ DEFAULT_CONFIG_NAME = (
158
+ "station_test" # It's not mandatory to have a default configuration. Just use one if it make sense.
159
+ )
160
+
161
+ def _info(self):
162
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
163
+ if (
164
+ (self.config.name == "station")
165
+ or (self.config.name == "station_train")
166
+ or (self.config.name == "station_test")
167
+ ):
168
+ features = datasets.Features(
169
+ {
170
+ "id": datasets.Value("string"),
171
+ "event_id": datasets.Value("string"),
172
+ "station_id": datasets.Value("string"),
173
+ "waveform": datasets.Array2D(shape=(3, self.nt), dtype="float32"),
174
+ "phase_time": datasets.Sequence(datasets.Value("string")),
175
+ "phase_index": datasets.Sequence(datasets.Value("int32")),
176
+ "phase_type": datasets.Sequence(datasets.Value("string")),
177
+ "phase_polarity": datasets.Sequence(datasets.Value("string")),
178
+ "begin_time": datasets.Value("string"),
179
+ "end_time": datasets.Value("string"),
180
+ "event_time": datasets.Value("string"),
181
+ "event_time_index": datasets.Value("int32"),
182
+ "event_location": datasets.Sequence(datasets.Value("float32")),
183
+ "station_location": datasets.Sequence(datasets.Value("float32")),
184
+ },
185
+ )
186
+ elif (self.config.name == "event") or (self.config.name == "event_train") or (self.config.name == "event_test"):
187
+ features = datasets.Features(
188
+ {
189
+ "event_id": datasets.Value("string"),
190
+ "waveform": datasets.Array3D(shape=(None, 3, self.nt), dtype="float32"),
191
+ "phase_time": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
192
+ "phase_index": datasets.Sequence(datasets.Sequence(datasets.Value("int32"))),
193
+ "phase_type": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
194
+ "phase_polarity": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
195
+ "begin_time": datasets.Value("string"),
196
+ "end_time": datasets.Value("string"),
197
+ "event_time": datasets.Value("string"),
198
+ "event_time_index": datasets.Value("int32"),
199
+ "event_location": datasets.Sequence(datasets.Value("float32")),
200
+ "station_location": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
201
+ },
202
+ )
203
+ else:
204
+ raise ValueError(f"config.name = {self.config.name} is not in BUILDER_CONFIGS")
205
+
206
+ return datasets.DatasetInfo(
207
+ # This is the description that will appear on the datasets page.
208
+ description=_DESCRIPTION,
209
+ # This defines the different columns of the dataset and their types
210
+ features=features, # Here we define them above because they are different between the two configurations
211
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
212
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
213
+ # supervised_keys=("sentence", "label"),
214
+ # Homepage of the dataset for documentation
215
+ homepage=_HOMEPAGE,
216
+ # License for the dataset if available
217
+ license=_LICENSE,
218
+ # Citation for the dataset
219
+ citation=_CITATION,
220
+ )
221
+
222
+ def _split_generators(self, dl_manager):
223
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
224
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
225
+
226
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
227
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
228
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
229
+ urls = _URLS[self.config.name]
230
+ # files = dl_manager.download(urls)
231
+ if "bucket" not in self.storage_options:
232
+ files = dl_manager.download_and_extract(urls)
233
+ else:
234
+ files = [f"{self.storage_options['bucket']}/{x}" for x in _FILES]
235
+ # files = [f"/nfs/quakeflow_dataset/NC/quakeflow_nc/waveform_h5/{x}" for x in _FILES][-3:]
236
+ print("Files:\n", "\n".join(sorted(files)))
237
+ print(self.storage_options)
238
+
239
+ if self.config.name == "station" or self.config.name == "event":
240
+ return [
241
+ datasets.SplitGenerator(
242
+ name=datasets.Split.TRAIN,
243
+ # These kwargs will be passed to _generate_examples
244
+ gen_kwargs={"filepath": files[:-1], "split": "train"},
245
+ ),
246
+ datasets.SplitGenerator(
247
+ name=datasets.Split.TEST,
248
+ gen_kwargs={"filepath": files[-1:], "split": "test"},
249
+ ),
250
+ ]
251
+ elif self.config.name == "station_train" or self.config.name == "event_train":
252
+ return [
253
+ datasets.SplitGenerator(
254
+ name=datasets.Split.TRAIN,
255
+ gen_kwargs={"filepath": files, "split": "train"},
256
+ ),
257
+ ]
258
+ elif self.config.name == "station_test" or self.config.name == "event_test":
259
+ return [
260
+ datasets.SplitGenerator(
261
+ name=datasets.Split.TEST,
262
+ gen_kwargs={"filepath": files, "split": "test"},
263
+ ),
264
+ ]
265
+ else:
266
+ raise ValueError("config.name is not in BUILDER_CONFIGS")
267
+
268
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
269
+ def _generate_examples(self, filepath, split):
270
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
271
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
272
+
273
+ for file in filepath:
274
+ print(f"\nReading {file}")
275
+ with fsspec.open(file, "rb") as fs:
276
+ with h5py.File(fs, "r") as fp:
277
+ event_ids = list(fp.keys())
278
+ for event_id in event_ids:
279
+ event = fp[event_id]
280
+ event_attrs = event.attrs
281
+ begin_time = event_attrs["begin_time"]
282
+ end_time = event_attrs["end_time"]
283
+ event_location = [
284
+ event_attrs["longitude"],
285
+ event_attrs["latitude"],
286
+ event_attrs["depth_km"],
287
+ ]
288
+ event_time = event_attrs["event_time"]
289
+ event_time_index = event_attrs["event_time_index"]
290
+ station_ids = list(event.keys())
291
+ if len(station_ids) == 0:
292
+ continue
293
+ if (
294
+ (self.config.name == "station")
295
+ or (self.config.name == "station_train")
296
+ or (self.config.name == "station_test")
297
+ ):
298
+ waveform = np.zeros([3, self.nt], dtype="float32")
299
+
300
+ for i, station_id in enumerate(station_ids):
301
+ waveform[:, : self.nt] = event[station_id][:, : self.nt]
302
+ attrs = event[station_id].attrs
303
+ phase_type = attrs["phase_type"]
304
+ phase_time = attrs["phase_time"]
305
+ phase_index = attrs["phase_index"]
306
+ phase_polarity = attrs["phase_polarity"]
307
+ station_location = [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
308
+
309
+ yield f"{event_id}/{station_id}", {
310
+ "id": f"{event_id}/{station_id}",
311
+ "event_id": event_id,
312
+ "station_id": station_id,
313
+ "waveform": waveform,
314
+ "phase_time": phase_time,
315
+ "phase_index": phase_index,
316
+ "phase_type": phase_type,
317
+ "phase_polarity": phase_polarity,
318
+ "begin_time": begin_time,
319
+ "end_time": end_time,
320
+ "event_time": event_time,
321
+ "event_time_index": event_time_index,
322
+ "event_location": event_location,
323
+ "station_location": station_location,
324
+ }
325
+
326
+ elif (
327
+ (self.config.name == "event")
328
+ or (self.config.name == "event_train")
329
+ or (self.config.name == "event_test")
330
+ ):
331
+
332
+ waveform = np.zeros([len(station_ids), 3, self.nt], dtype="float32")
333
+ phase_type = []
334
+ phase_time = []
335
+ phase_index = []
336
+ phase_polarity = []
337
+ station_location = []
338
+
339
+ for i, station_id in enumerate(station_ids):
340
+ waveform[i, :, : self.nt] = event[station_id][:, : self.nt]
341
+ attrs = event[station_id].attrs
342
+ phase_type.append(list(attrs["phase_type"]))
343
+ phase_time.append(list(attrs["phase_time"]))
344
+ phase_index.append(list(attrs["phase_index"]))
345
+ phase_polarity.append(list(attrs["phase_polarity"]))
346
+ station_location.append(
347
+ [attrs["longitude"], attrs["latitude"], -attrs["elevation_m"] / 1e3]
348
+ )
349
+ yield event_id, {
350
+ "event_id": event_id,
351
+ "waveform": waveform,
352
+ "phase_time": phase_time,
353
+ "phase_index": phase_index,
354
+ "phase_type": phase_type,
355
+ "phase_polarity": phase_polarity,
356
+ "begin_time": begin_time,
357
+ "end_time": end_time,
358
+ "event_time": event_time,
359
+ "event_time_index": event_time_index,
360
+ "event_location": event_location,
361
+ "station_location": station_location,
362
+ }
upload.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import HfApi
2
+
3
+ api = HfApi()
4
+
5
+ # Upload all the content from the local folder to your remote Space.
6
+ # By default, files are uploaded at the root of the repo
7
+ api.upload_folder(
8
+ folder_path="./",
9
+ repo_id="AI4EPS/quakeflow_nc",
10
+ repo_type="space",
11
+ )