helenqu commited on
Commit
306a138
·
1 Parent(s): 912bfe8
.gitattributes CHANGED
@@ -53,3 +53,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ plasticc_train_augmented_4.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ plasticc_train_augmented_5.jsonl filter=lfs diff=lfs merge=lfs -text
58
+ plasticc_train_augmented_6.jsonl filter=lfs diff=lfs merge=lfs -text
59
+ plasticc_train_augmented_8.jsonl filter=lfs diff=lfs merge=lfs -text
60
+ plasticc_train_augmented_2.jsonl filter=lfs diff=lfs merge=lfs -text
61
+ plasticc_train_augmented_1.jsonl filter=lfs diff=lfs merge=lfs -text
62
+ plasticc_train_augmented_3.jsonl filter=lfs diff=lfs merge=lfs -text
63
+ plasticc_train_augmented_7.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ plasticc_train_augmented_9.jsonl filter=lfs diff=lfs merge=lfs -text
65
+ train.jsonl filter=lfs diff=lfs merge=lfs -text
66
+ val.jsonl filter=lfs diff=lfs merge=lfs -text
67
+ plasticc_train_augmented_0.jsonl filter=lfs diff=lfs merge=lfs -text
plasticc_train_augmented_0.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00927ace4c4c221e9f88cff6b897cc0b81f8fb5db954c4ff4510fc738a34c08d
3
+ size 19034864
plasticc_train_augmented_1.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d068dcba34ab478f25987e95c69f643019b38c9bc1056ad79f3e75f9802d10c6
3
+ size 860899559
plasticc_train_augmented_2.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de29275cac4393eadf6cdcb03fbf340a9a7de5f009a73ed23630c2a8e7bc2d28
3
+ size 828566728
plasticc_train_augmented_3.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6578db232c69171d8bc63ad03079c948ebcf6c95331f4f57a053c41042c32a6e
3
+ size 760126858
plasticc_train_augmented_4.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3837873be6ce76d81f2dd3cfb3c6dd24f3c611062d97af731fa30e196d1d7747
3
+ size 754818230
plasticc_train_augmented_5.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06eb167b47e2903e7df2b739a04f8cd31e3f7c59696ee044075bdc8774cee864
3
+ size 764785764
plasticc_train_augmented_6.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e7ba3baea3691d92171226b655eed03067e036d2ccb0526d4f6dab4c6859c1a
3
+ size 759301257
plasticc_train_augmented_7.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:422e10308af0aeb34c654378c6cfc0b90272ab30fd3e498291d98376cf1626ea
3
+ size 757834113
plasticc_train_augmented_8.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb02dfad3698c6f23ae49fa6db3e429c181fb4fea43e87183280183a8477c477
3
+ size 761242767
plasticc_train_augmented_9.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:637b692a6d1a7e218d0e57690f445b9f970e5ceca86f47699273ace12a6a835d
3
+ size 747819889
train_augmented_dataset.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+ import jsonlines
18
+ import pandas as pd
19
+ from pathlib import Path
20
+ from connect_later.split_dataset_into_files import split_augmented_jsonl_dataset
21
+ from connect_later.constants import PLASTICC_CLASS_MAPPING, INT_LABELS
22
+
23
+ import datasets
24
+ import pdb
25
+
26
+ RAW_DATA_PATH = "/pscratch/sd/h/helenqu/plasticc/raw"
27
+ DATASET_PATH = "/pscratch/sd/h/helenqu/plasticc/train_augmented_dataset"
28
+ ORIG_DATASET_PATH = "/pscratch/sd/h/helenqu/plasticc/raw_train_with_labels"
29
+
30
+ # PLASTICC_CLASS_MAPPING = {
31
+ # 90: "SNIa",
32
+ # 67: "SNIa-91bg",
33
+ # 52: "SNIax",
34
+ # 42: "SNII",
35
+ # 62: "SNIbc",
36
+ # 95: "SLSN-I",
37
+ # 15: "TDE",
38
+ # 64: "KN",
39
+ # 88: "AGN",
40
+ # 92: "RRL",
41
+ # 65: "M-dwarf",
42
+ # 16: "EB",
43
+ # 53: "Mira",
44
+ # 6: "$\mu$-Lens-Single",
45
+ # }
46
+ # INT_LABELS = sorted(PLASTICC_CLASS_MAPPING.keys())
47
+
48
+ # TODO: Add BibTeX citation
49
+ # Find for instance the citation on arxiv or on the dataset repo/website
50
+ _CITATION = """\
51
+ @InProceedings{huggingface:dataset,
52
+ title = {A great new dataset},
53
+ author={huggingface, Inc.
54
+ },
55
+ year={2020}
56
+ }
57
+ """
58
+
59
+ # TODO: Add description of the dataset here
60
+ # You can copy an official description
61
+ _DESCRIPTION = """\
62
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
63
+ """
64
+
65
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
66
+ class NewDataset(datasets.GeneratorBasedBuilder):
67
+ """TODO: Short description of my dataset."""
68
+
69
+ VERSION = datasets.Version("1.1.0")
70
+
71
+ def _info(self):
72
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
73
+ # if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
74
+ features = datasets.Features(
75
+ {
76
+ "objid": datasets.Value("string"),
77
+ "times_wv": datasets.Array2D(shape=(300, 2), dtype='float64'), # ith row is [time, central wv of band]
78
+ "target": datasets.Array2D(shape=(300, 2), dtype='float64'), # the time series data, ith row is [flux, flux_err]
79
+ "label": datasets.ClassLabel(
80
+ num_classes=len(PLASTICC_CLASS_MAPPING),
81
+ names=[PLASTICC_CLASS_MAPPING[int_label] for int_label in INT_LABELS]
82
+ ),
83
+ "redshift": datasets.Value("float32"),
84
+ }
85
+ )
86
+ return datasets.DatasetInfo(
87
+ # This is the description that will appear on the datasets page.
88
+ description=_DESCRIPTION,
89
+ # This defines the different columns of the dataset and their types
90
+ features=features, # Here we define them above because they are different between the two configurations
91
+ )
92
+
93
+ def _split_generators(self, dl_manager):
94
+ dataset_path = Path(DATASET_PATH)
95
+ if not (dataset_path / 'train.jsonl').exists():
96
+ print('Splitting dataset into files...')
97
+ split_augmented_jsonl_dataset(DATASET_PATH, Path(ORIG_DATASET_PATH) / "plasticc_train_lightcurves.csv.jsonl", "*.jsonl", 0.8)
98
+ print(f"int index to label mapping: {INT_LABELS}")
99
+ print(f"label to class name mapping: {PLASTICC_CLASS_MAPPING}")
100
+
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ # These kwargs will be passed to _generate_examples
105
+ gen_kwargs={
106
+ "filepath": dataset_path / "train.jsonl",
107
+ "split": "train",
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.VALIDATION,
112
+ # These kwargs will be passed to _generate_examples
113
+ gen_kwargs={
114
+ "filepath": dataset_path / "val.jsonl",
115
+ "split": "dev",
116
+ },
117
+ ),
118
+ ]
119
+
120
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
121
+ def _generate_examples(self, filepath, split):
122
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
123
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
124
+ metadata = pd.read_csv(Path(RAW_DATA_PATH) / 'plasticc_train_metadata.csv.gz')
125
+
126
+ with jsonlines.open(filepath) as reader:
127
+ for obj in reader:
128
+ objid = int(obj['object_id'].split('_')[1]) if type(obj['object_id']) == str else obj['object_id'] # avocado objids are of the form 'plasticc_id{_aug_hash}'
129
+ metadata_obj = metadata[metadata['object_id'] == objid]
130
+ label = list(INT_LABELS).index(metadata_obj.true_target.values[0])
131
+ redshift = metadata_obj.true_z.values[0]
132
+ yield obj['object_id'], {
133
+ "objid": obj['object_id'],
134
+ "times_wv": obj['times_wv'],
135
+ "target": obj['lightcurve'],
136
+ "label": label,
137
+ "redshift": redshift
138
+ }