|
|
|
r""" |
|
Builds a WebDataset from the Cityscapes Video dataset. |
|
|
|
Adapted from the `WebDataset documentation<https://github.com/webdataset/webdataset/>`_. |
|
""" |
|
|
|
import itertools |
|
import collections |
|
import typing as T |
|
from pprint import pformat |
|
import argparse |
|
import multiprocessing as mp |
|
import tarfile |
|
import pandas as pd |
|
from io import BytesIO |
|
import json |
|
|
|
from pathlib import Path |
|
from tqdm import tqdm |
|
|
|
|
|
def parse_args(): |
|
ap = argparse.ArgumentParser( |
|
description="Build a WebDataset from the Cityscapes Video dataset." |
|
) |
|
|
|
|
|
ap.add_argument( |
|
"--shard-size", |
|
"-s", |
|
type=int, |
|
default=10, |
|
help=("Number of sequences per shard."), |
|
) |
|
ap.add_argument( |
|
"--name", |
|
"-n", |
|
type=str, |
|
default="csvps", |
|
help=( |
|
"Name of the dataset. This will be used as the prefix for the tar files." |
|
), |
|
) |
|
ap.add_argument( |
|
"--variant", |
|
type=str, |
|
default="", |
|
help=( |
|
"When passing different manifest variants, this will be used to postfix " |
|
"each split such that the resulting dataset name is unique." |
|
), |
|
) |
|
ap.add_argument( |
|
"--force", "-f", action="store_true", help="Overwrite existing data." |
|
) |
|
ap.add_argument( |
|
"--splits", nargs="+", default=["train", "val", "test"], help="Splits to build." |
|
) |
|
ap.add_argument("--compression", "-c", default="", help="Compression to use") |
|
|
|
|
|
ap.add_argument("manifest", type=Path, help="Path to the manifest CSV file.") |
|
ap.add_argument("data", type=Path, help="Path to the Cityscapes Video dataset.") |
|
ap.add_argument("output", type=Path, help="Path to the output directory.") |
|
|
|
rt = ap.parse_args() |
|
|
|
|
|
if rt.shard_size < 1: |
|
ap.error("Shard size must be a positive integer.") |
|
if rt.name == "": |
|
ap.error("Name must be a non-empty string.") |
|
if not rt.name.isalnum() and not rt.name.islower(): |
|
ap.error("Name must be a lowercase alpha-numeric string.") |
|
if rt.variant != "" and not rt.variant.isalnum() and not rt.variant.islower(): |
|
ap.error("Variant must be a lowercase alpha-numeric string.") |
|
if not rt.manifest.exists(): |
|
ap.error(f"Manifest file not found: {rt.manifest}") |
|
if not rt.data.exists(): |
|
ap.error(f"Data directory not found: {rt.data}") |
|
if not rt.output.exists(): |
|
rt.output.mkdir(parents=True) |
|
print(f"Created output directory: {rt.output}") |
|
|
|
return rt |
|
|
|
|
|
PAD_TO: T.Final[int] = 6 |
|
|
|
|
|
def pad_number(n: int) -> str: |
|
r""" |
|
For sorting, numbers are padded with zeros to a fixed width. |
|
""" |
|
if not isinstance(n, int): |
|
msg = f"Expected an integer, got {n} of type {type(n)}" |
|
raise TypeError(msg) |
|
return f"{n:0{PAD_TO}d}" |
|
|
|
|
|
def read_timestamp(path: Path) -> int: |
|
with path.open("r") as f: |
|
ts = f.read().strip() |
|
if not ts.isdigit(): |
|
msg = f"Expected a timestamp, got {ts} from {path}" |
|
raise ValueError(msg) |
|
return int(ts) |
|
|
|
|
|
def write_bytes(tar: tarfile.TarFile, bt: bytes, arc: str): |
|
r""" " |
|
Simple utility to write the bytes (e.g. metadata json) directly from memory to |
|
the tarfile, since these do not exist as a file. |
|
""" |
|
with BytesIO() as buf: |
|
buf.write(bt) |
|
|
|
|
|
|
|
tar_info = tarfile.TarInfo(arc) |
|
tar_info.size = buf.tell() |
|
|
|
|
|
buf.seek(0) |
|
|
|
tar.addfile(tar_info, buf) |
|
|
|
|
|
def find_sequence_files( |
|
seq: int, |
|
group: pd.DataFrame, |
|
*, |
|
data_dir: Path, |
|
dataset_name: str, |
|
compression: str, |
|
missing_ok: bool = False, |
|
frame_inputs: T.Sequence[str] = ("image.png", "vehicle.json"), |
|
frame_annotations: T.Sequence[str] = ("panoptic.png", "depth.tiff"), |
|
sequence_data: T.Sequence[str] = ("camera.json",), |
|
separator: str = "/", |
|
) -> T.Iterator[tuple[Path | bytes, str]]: |
|
seq_pad = pad_number(seq) |
|
seq_dir = data_dir / seq_pad |
|
|
|
group = group.sort_values("frame") |
|
|
|
|
|
primary_keys = group.index.tolist() |
|
frame_numbers = list(map(pad_number, group["frame"].tolist())) |
|
|
|
for i, meta in enumerate( |
|
group.drop(columns=["sequence", "frame", "split"]).to_dict( |
|
orient="records", index=True |
|
) |
|
): |
|
frame_06 = frame_numbers[i] |
|
is_ann = meta["is_annotated"] |
|
|
|
|
|
meta["primary_key"] = primary_keys[i] |
|
|
|
|
|
for var in frame_inputs + frame_annotations: |
|
path_file = seq_dir / f"{frame_06}.{var}" |
|
if not path_file.exists(): |
|
if missing_ok or (var in frame_annotations and not is_ann): |
|
continue |
|
msg = f"File not found: {path_file}" |
|
raise FileNotFoundError(msg) |
|
|
|
yield ( |
|
path_file, |
|
separator.join( |
|
( |
|
dataset_name, |
|
|
|
path_file.relative_to(data_dir).as_posix().replace("/", "."), |
|
) |
|
), |
|
) |
|
|
|
|
|
path_ts = seq_dir / f"{frame_06}.timestamp.txt" |
|
if not path_ts.exists(): |
|
if not missing_ok: |
|
msg = f"Timestamp file not found: {path_ts}" |
|
raise FileNotFoundError(msg) |
|
meta["timestamp"] = None |
|
else: |
|
meta["timestamp"] = read_timestamp(path_ts) |
|
|
|
|
|
yield ( |
|
json.dumps(meta).encode("utf-8"), |
|
f"{dataset_name}/{seq_pad}.{frame_06}.metadata.json", |
|
) |
|
|
|
|
|
for var in sequence_data: |
|
path_file = seq_dir.with_suffix("." + var) |
|
if not path_file.exists(): |
|
if missing_ok: |
|
continue |
|
msg = f"File not found: {path_file}" |
|
raise FileNotFoundError(msg) |
|
|
|
yield ( |
|
path_file, |
|
separator.join( |
|
( |
|
dataset_name, |
|
|
|
path_file.relative_to(data_dir).as_posix(), |
|
) |
|
), |
|
) |
|
|
|
|
|
yield ( |
|
json.dumps(frame_numbers).encode("utf-8"), |
|
f"{dataset_name}/{seq_pad}.frames.json", |
|
) |
|
|
|
|
|
def run_collector( |
|
seq: int, group: pd.DataFrame, kwargs: dict |
|
) -> tuple[int, list[tuple[Path | bytes, str]]]: |
|
r""" |
|
Worker that collects the files for a single sequence. |
|
""" |
|
return (seq, list(find_sequence_files(seq, group, **kwargs))) |
|
|
|
|
|
def run_writer( |
|
tar_path: Path, items: list[list[tuple[Path | bytes, str]]], compression: str = "gz" |
|
) -> None: |
|
r""" |
|
Worker that writes the files to a tar archive. |
|
""" |
|
if compression != "": |
|
tar_path = tar_path.with_suffix(f".tar.{compression}") |
|
write_mode = f"w:{compression}" |
|
else: |
|
tar_path = tar_path.with_suffix(".tar") |
|
write_mode = "w" |
|
|
|
with tarfile.open(tar_path, write_mode) as tar: |
|
for item in itertools.chain.from_iterable(items): |
|
try: |
|
path, arc = item |
|
except ValueError: |
|
msg = f"Expected a tuple of length 2, got {item}" |
|
raise ValueError(msg) |
|
|
|
if isinstance(path, Path): |
|
tar.add(path, arcname=arc) |
|
else: |
|
write_bytes(tar, path, arc) |
|
|
|
|
|
def build_shard( |
|
mfst: pd.DataFrame, |
|
*, |
|
tar_dir: Path, |
|
shard_size: int, |
|
**kwargs, |
|
): |
|
|
|
tar_dir.mkdir(exist_ok=True, parents=True) |
|
|
|
write_log = collections.defaultdict(list) |
|
|
|
|
|
|
|
|
|
n_groups = len(mfst["sequence"].unique()) |
|
n_shards = n_groups // shard_size |
|
|
|
targets = [None] * n_groups |
|
|
|
|
|
n_proc = min(mp.cpu_count(), 16) |
|
with mp.Pool(n_proc) as pool: |
|
write_jobs: list[mp.AsyncResult] = [] |
|
|
|
|
|
with tqdm(total=n_groups, desc="Collecting data") as pbar_group: |
|
for seq, files in pool.starmap( |
|
run_collector, |
|
[(seq, group, kwargs) for seq, group in mfst.groupby("sequence")], |
|
chunksize=min(8, shard_size), |
|
): |
|
assert targets[seq] is None, f"Duplicate sequence: {seq}" |
|
|
|
pbar_group.update() |
|
|
|
|
|
targets[seq] = files |
|
|
|
|
|
shard_index = seq // shard_size |
|
shard_offset = shard_index * shard_size |
|
shard_specs = targets[shard_offset : shard_offset + shard_size] |
|
|
|
|
|
shard_06 = pad_number(shard_index) |
|
|
|
write_log[shard_06].append(pad_number(seq)) |
|
|
|
|
|
if all(s is not None for s in shard_specs): |
|
tar_path = tar_dir / shard_06 |
|
|
|
write_jobs.append( |
|
pool.apply_async( |
|
run_writer, |
|
(tar_path, shard_specs, ""), |
|
) |
|
) |
|
|
|
|
|
with tqdm(total=n_shards, desc="Writing shards") as pbar_shard: |
|
for j in write_jobs: |
|
j.get() |
|
pbar_shard.update() |
|
|
|
pool.close() |
|
pool.join() |
|
|
|
print("Created shard files:\n" + pformat(dict(write_log))) |
|
|
|
|
|
def main(): |
|
args = parse_args() |
|
manifest = pd.read_csv(args.manifest, index_col="primary_key") |
|
|
|
|
|
for split in args.splits: |
|
split_out = "-".join([s for s in (split, args.variant) if len(s) > 0]) |
|
tar_dir = args.output / split_out |
|
|
|
if tar_dir.exists(): |
|
if args.force: |
|
print(f"Removing existing dataset: {tar_dir}") |
|
for f in tar_dir.glob("*.tar"): |
|
f.unlink() |
|
else: |
|
msg = f"Dataset already exists: {tar_dir}" |
|
raise FileExistsError(msg) |
|
|
|
print(f"Generating {split_out} split...") |
|
|
|
build_shard( |
|
manifest[manifest["split"] == split], |
|
tar_dir=tar_dir, |
|
data_dir=args.data / split, |
|
shard_size=args.shard_size, |
|
dataset_name=f"{args.name}-{split_out}", |
|
missing_ok=True, |
|
compression=args.compression |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|