rithwiks commited on
Commit
cd8db21
·
1 Parent(s): 3666c10

utils made, baselines uploaded

Browse files
GBI-16-2D-Legacy.py CHANGED
@@ -127,63 +127,4 @@ class GBI_16_2D_Legacy(datasets.GeneratorBasedBuilder):
127
  # this data is natively formatted like (1, 4200, 2154)
128
  # just use the 2D image
129
  image_data = hdul[0].data[0,:,:].tolist()
130
- yield task_instance_key, {**{"image": image_data}, **item}
131
-
132
- def make_split_jsonl_files(config_type="tiny", data_dir="./data",
133
- telescope_subdirectories=["INT", "JKT","LCO", "TJO", "WHT"],
134
- outdir="./splits", seed=42):
135
- """
136
- Create jsonl files for the GBI-16-2D-Legacy dataset.
137
-
138
- config_type: str, default="tiny"
139
- The type of split to create. Options are "tiny" and "full".
140
- data_dir: str, default="./data"
141
- The directory where the FITS files are located.
142
- telescope_subdirectories: list, default=["INT", "JKT","LCO", "TJO", "WHT"]
143
- The subdirectories of the data_dir that contain the FITS files for each telescope.
144
- outdir: str, default="./splits"
145
- The directory where the jsonl files will be created.
146
- seed: int, default=42
147
- The seed for the random split.
148
- """
149
- random.seed(seed)
150
- os.makedirs(outdir, exist_ok=True)
151
-
152
- fits_files = []
153
- for subdir in telescope_subdirectories:
154
- fits_files.extend(glob(os.path.join(data_dir, subdir, "*.fits")))
155
-
156
- random.shuffle(fits_files)
157
-
158
- if config_type == "tiny":
159
- train_files = []
160
- test_files = []
161
- for subdir in telescope_subdirectories:
162
- subdir_files = [f for f in fits_files if subdir in f]
163
- train_files.extend(subdir_files[:2])
164
- test_files.extend(subdir_files[2:3])
165
- elif config_type == "full":
166
- train_files = []
167
- test_files = []
168
- for subdir in telescope_subdirectories:
169
- subdir_files = [f for f in fits_files if subdir in f]
170
- split_idx = int(0.8 * len(subdir_files))
171
- train_files.extend(subdir_files[:split_idx])
172
- test_files.extend(subdir_files[split_idx:])
173
- else:
174
- raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
175
-
176
- def create_jsonl(files, split_name):
177
- output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
178
- with open(output_file, "w") as out_f:
179
- for file in files:
180
- print(file, flush=True, end="...")
181
- with fits.open(file, memmap=False) as hdul:
182
- image_id = os.path.basename(file).split(".fits")[0]
183
-
184
- telescope = hdul[0].header.get('TELESCOP', 'UNKNOWN')
185
- item = {"image_id": image_id, "image": file, "telescope": telescope}
186
- out_f.write(json.dumps(item) + "\n")
187
-
188
- create_jsonl(train_files, "train")
189
- create_jsonl(test_files, "test")
 
127
  # this data is natively formatted like (1, 4200, 2154)
128
  # just use the 2D image
129
  image_data = hdul[0].data[0,:,:].tolist()
130
+ yield task_instance_key, {**{"image": image_data}, **item}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baseline_results_2d.csv ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ,JPEG_XL_MAX_EFFORT_BPD,JPEG_XL_MAX_EFFORT_WRITE_RUNTIME,JPEG_XL_MAX_EFFORT_READ_RUNTIME,JPEG_XL_BPD,JPEG_XL_WRITE_RUNTIME,JPEG_XL_READ_RUNTIME,JPEG_2K_BPD,JPEG_2K_WRITE_RUNTIME,JPEG_2K_READ_RUNTIME,JPEG_LS_BPD,JPEG_LS_WRITE_RUNTIME,JPEG_LS_READ_RUNTIME,RICE_BPD,RICE_WRITE_RUNTIME,RICE_READ_RUNTIME
2
+ ./data/LCO/ogg2m001.fits,,,,,,,,,,,,,,,
3
+ ./data/LCO/coj0m403.fits,,,,,,,,,,,,,,,
4
+ ./data/LCO/lsc0m409.fits,,,,,,,,,,,,,,,
5
+ ./data/LCO/cpt0m407.fits,,,,,,,,,,,,,,,
6
+ ./data/LCO/ogg0m404.fits,,,,,,,,,,,,,,,
7
+ ./data/LCO/coj2m002.fits,,,,,,,,,,,,,,,
8
+ ./data/LCO/coj0m405.fits,,,,,,,,,,,,,,,
9
+ ./data/LCO/lsc0m412.fits,,,,,,,,,,,,,,,
10
+ ./data/LCO/elp0m411.fits,,,,,,,,,,,,,,,
11
+ ./data/LCO/ogg2m001.fits_hdu0,0.7228008563701923,43.20007514953613,0.5066413879394531,0.7646298922025241,0.4445362091064453,0.2911219596862793,0.7561227651742789,0.7407228946685791,0.6194639205932617,0.7517606295072116,0.14260530471801758,0.22156763076782227,0.8046292818509615,0.07075810432434082,0.044094085693359375
12
+ ./data/LCO/coj0m403.fits_hdu0,0.6845067483669605,67.48599791526794,0.7721624374389648,0.7279621187963822,0.6539788246154785,0.4639432430267334,0.7265686903312462,1.1587002277374268,0.9693844318389893,0.7221424525065708,0.20934057235717773,0.1679387092590332,0.763859698709029,0.08583760261535645,0.06677007675170898
13
+ ./data/LCO/lsc0m409.fits_hdu0,0.6610539990143784,46.64720416069031,0.6685361862182617,0.7128655617076376,0.6489887237548828,0.44870448112487793,0.7006576245555041,1.0780272483825684,0.9041671752929688,0.7085028880160018,0.2137744426727295,0.17609882354736328,0.7456735623937075,0.08374571800231934,0.06576395034790039
14
+ ./data/LCO/cpt0m407.fits_hdu0,0.6880637730944651,47.97620415687561,0.7337470054626465,0.7368511324984539,0.6511118412017822,0.4521498680114746,0.7296890702303649,1.1380155086517334,0.9582910537719727,0.7261818701202072,0.20433974266052246,0.15947604179382324,0.7707059030998763,0.08529806137084961,0.06650400161743164
15
+ ./data/LCO/ogg0m404.fits_hdu0,0.7123647488114564,52.49390196800232,0.5467383861541748,0.7629273744298856,0.6397292613983154,0.4389636516571045,0.7612617223736086,1.2026004791259766,1.0056798458099365,0.7506706695462276,0.19949769973754883,0.1565086841583252,0.797979143717146,0.08756709098815918,0.06797599792480469
16
+ ./data/LCO/coj2m002.fits_hdu0,0.48990149864783655,31.05488395690918,0.4487757682800293,0.5265641432542068,0.3766806125640869,0.2681262493133545,0.5202474740835337,0.564016580581665,0.465839147567749,0.5321988619290865,0.11781430244445801,0.0965888500213623,0.5791156475360577,0.054184913635253906,0.04157567024230957
17
+ ./data/LCO/coj0m405.fits_hdu0,0.7249306682900433,59.124993562698364,0.736142635345459,0.772575383136209,0.6583375930786133,0.4584355354309082,0.7651275087449753,1.193568468093872,1.008777141571045,0.758441196080705,0.20306921005249023,0.16224908828735352,0.8085132756938003,0.09140729904174805,0.07058954238891602
18
+ ./data/LCO/lsc0m412.fits_hdu0,0.695435702690167,55.14425826072693,0.7698564529418945,0.7426116131822047,0.6599817276000977,0.44414687156677246,0.7424180520929963,1.2062599658966064,1.0164411067962646,0.7327258051658163,0.20291376113891602,0.1620502471923828,0.7762316041473407,0.08937621116638184,0.06872892379760742
utils/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .create_splits import *
utils/create_splits.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from glob import glob
4
+ import json
5
+ from huggingface_hub import hf_hub_download
6
+
7
+
8
+ from astropy.io import fits
9
+ import datasets
10
+ from datasets import DownloadManager
11
+ from fsspec.core import url_to_fs
12
+ from tqdm import tqdm
13
+
14
+
15
+ def make_split_jsonl_files(config_type="tiny", data_dir="./data",
16
+ telescope_subdirectories=["INT", "JKT","LCO", "TJO", "WHT"],
17
+ outdir="./splits", seed=42):
18
+ """
19
+ Create jsonl files for the GBI-16-2D-Legacy dataset.
20
+
21
+ config_type: str, default="tiny"
22
+ The type of split to create. Options are "tiny" and "full".
23
+ data_dir: str, default="./data"
24
+ The directory where the FITS files are located.
25
+ telescope_subdirectories: list, default=["INT", "JKT","LCO", "TJO", "WHT"]
26
+ The subdirectories of the data_dir that contain the FITS files for each telescope.
27
+ outdir: str, default="./splits"
28
+ The directory where the jsonl files will be created.
29
+ seed: int, default=42
30
+ The seed for the random split.
31
+ """
32
+ random.seed(seed)
33
+ os.makedirs(outdir, exist_ok=True)
34
+
35
+ fits_files = []
36
+ for subdir in telescope_subdirectories:
37
+ fits_files.extend(glob(os.path.join(data_dir, subdir, "*.fits")))
38
+
39
+ random.shuffle(fits_files)
40
+
41
+ if config_type == "tiny":
42
+ train_files = []
43
+ test_files = []
44
+ for subdir in telescope_subdirectories:
45
+ subdir_files = [f for f in fits_files if subdir in f]
46
+ train_files.extend(subdir_files[:2])
47
+ test_files.extend(subdir_files[2:3])
48
+ elif config_type == "full":
49
+ train_files = []
50
+ test_files = []
51
+ for subdir in telescope_subdirectories:
52
+ subdir_files = [f for f in fits_files if subdir in f]
53
+ split_idx = int(0.8 * len(subdir_files))
54
+ train_files.extend(subdir_files[:split_idx])
55
+ test_files.extend(subdir_files[split_idx:])
56
+ else:
57
+ raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
58
+
59
+ def create_jsonl(files, split_name):
60
+ output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
61
+ with open(output_file, "w") as out_f:
62
+ for file in files:
63
+ print(file, flush=True, end="...")
64
+ with fits.open(file, memmap=False) as hdul:
65
+ image_id = os.path.basename(file).split(".fits")[0]
66
+
67
+ telescope = hdul[0].header.get('TELESCOP', 'UNKNOWN')
68
+ item = {"image_id": image_id, "image": file, "telescope": telescope}
69
+ out_f.write(json.dumps(item) + "\n")
70
+
71
+ create_jsonl(train_files, "train")
72
+ create_jsonl(test_files, "test")
73
+
74
+
75
+ if __name__ == "__main__":
76
+ make_split_jsonl_files("tiny")
77
+ make_split_jsonl_files("full")
utils/eval_baselines.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Runs several baseline compression algorithms and stores results for each FITS file in a csv.
3
+ This code is written functionality-only and cleaning it up is a TODO.
4
+
5
+ Only runs on LCO data.
6
+ """
7
+
8
+
9
+ import os
10
+ import re
11
+ from pathlib import Path
12
+ import argparse
13
+ import os.path
14
+ from astropy.io import fits
15
+ import numpy as np
16
+ from time import time
17
+ import pandas as pd
18
+ from tqdm import tqdm
19
+ import glob
20
+
21
+ from astropy.io.fits import CompImageHDU
22
+ from imagecodecs import (
23
+ jpeg2k_encode,
24
+ jpeg2k_decode,
25
+ jpegls_encode,
26
+ jpegls_decode,
27
+ jpegxl_encode,
28
+ jpegxl_decode,
29
+ rcomp_encode,
30
+ rcomp_decode,
31
+ )
32
+
33
+ # Functions that require some preset parameters. All others default to lossless.
34
+
35
+ jpegxl_encode_max_effort_preset = lambda x: jpegxl_encode(x, lossless=True, effort=9)
36
+ jpegxl_encode_preset = lambda x: jpegxl_encode(x, lossless=True)
37
+
38
+ def find_matching_files(root_dir='./data/LCO'):
39
+ # Use glob to recursively find all .fits files
40
+ pattern = os.path.join(root_dir, '**', '*.fits')
41
+ fits_files = glob.glob(pattern, recursive=True)
42
+ return fits_files
43
+
44
+ def benchmark_imagecodecs_compression_algos(arr, compression_type):
45
+
46
+ encoder, decoder = ALL_CODECS[compression_type]
47
+
48
+ write_start_time = time()
49
+ encoded = encoder(arr)
50
+ write_time = time() - write_start_time
51
+
52
+ read_start_time = time()
53
+ if compression_type == "RICE":
54
+ decoded = decoder(encoded, shape=arr.shape, dtype=np.uint16)
55
+ else:
56
+ decoded = decoder(encoded)
57
+ read_time = time() - read_start_time
58
+
59
+ assert np.array_equal(arr, decoded)
60
+
61
+ buflength = len(encoded)
62
+
63
+ return {compression_type + "_BPD": buflength / arr.size,
64
+ compression_type + "_WRITE_RUNTIME": write_time,
65
+ compression_type + "_READ_RUNTIME": read_time,
66
+ #compression_type + "_TILE_DIVISOR": np.nan,
67
+ }
68
+
69
+ def main(dim):
70
+
71
+ save_path = f"baseline_results_{dim}.csv"
72
+
73
+ file_paths = find_matching_files()
74
+
75
+ df = pd.DataFrame(columns=columns, index=[str(p) for p in file_paths])
76
+
77
+ print(f"Number of files to be tested: {len(file_paths)}")
78
+
79
+ ct = 0
80
+
81
+ for path in tqdm(file_paths):
82
+ for hdu_idx in [0]:
83
+ with fits.open(path) as hdul:
84
+ if dim == '2d':
85
+ arr = hdul[hdu_idx].data[0]
86
+ else:
87
+ raise RuntimeError(f"{dim} not applicable.")
88
+
89
+ ct += 1
90
+ if ct % 1 == 0:
91
+ print(df.mean())
92
+ df.to_csv(save_path)
93
+
94
+ for algo in ALL_CODECS.keys():
95
+ try:
96
+ if algo == "JPEG_2K" and dim != '2d':
97
+ test_results = benchmark_imagecodecs_compression_algos(arr.transpose(1, 2, 0), algo)
98
+ else:
99
+ test_results = benchmark_imagecodecs_compression_algos(arr, algo)
100
+
101
+ for column, value in test_results.items():
102
+ if column in df.columns:
103
+ df.at[path + f"_hdu{hdu_idx}", column] = value
104
+
105
+ except Exception as e:
106
+ print(f"Failed at {path} under exception {e}.")
107
+
108
+
109
+ if __name__ == "__main__":
110
+ parser = argparse.ArgumentParser(description="Process some 2D or 3D data.")
111
+ parser.add_argument(
112
+ "dimension",
113
+ choices=['2d'],
114
+ help="Specify whether the data is 2d, or; not applicable here: 3dt (3d time dimension), or 3dw (3d wavelength dimension)."
115
+ )
116
+ args = parser.parse_args()
117
+ dim = args.dimension.lower()
118
+
119
+ # RICE REQUIRES UNIQUE INPUT OF ARR SHAPE AND DTYPE INTO DECODER
120
+
121
+ ALL_CODECS = {
122
+ "JPEG_XL_MAX_EFFORT": [jpegxl_encode_max_effort_preset, jpegxl_decode],
123
+ "JPEG_XL": [jpegxl_encode_preset, jpegxl_decode],
124
+ "JPEG_2K": [jpeg2k_encode, jpeg2k_decode],
125
+ "JPEG_LS": [jpegls_encode, jpegls_decode],
126
+ "RICE": [rcomp_encode, rcomp_decode],
127
+ }
128
+
129
+ columns = []
130
+ for algo in ALL_CODECS.keys():
131
+ columns.append(algo + "_BPD")
132
+ columns.append(algo + "_WRITE_RUNTIME")
133
+ columns.append(algo + "_READ_RUNTIME")
134
+ #columns.append(algo + "_TILE_DIVISOR")
135
+
136
+ main(dim)