|
import os |
|
import json |
|
import datasets |
|
import logging |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
_DESCRIPTION = """ |
|
This dataset contains 3D MRI scans in NIfTI (.nii.gz) format, organized in a BIDS-like structure, |
|
alongside JSON sidecar files with metadata. The data represents structural brain MRI scans from |
|
multiple studies. For each scan, a `.nii.gz` file is provided, along with a `.json` file containing |
|
subject/session metadata, scanner information, clinical diagnoses, etc. |
|
""" |
|
|
|
_CITATION = """ |
|
@article{exampleCitation2024, |
|
title={Example Brain MRI Dataset Citation}, |
|
author={Your Name and Others}, |
|
journal={Journal of Great Datasets}, |
|
year={2024}, |
|
} |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/radiata-ai/brains-structure" |
|
|
|
_LICENSE = "Multiple study-specific licenses; see JSON sidecars for details." |
|
|
|
class BrainsStructureConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for the Brains-Structure dataset.""" |
|
def __init__(self, **kwargs): |
|
super(BrainsStructureConfig, self).__init__(**kwargs) |
|
|
|
|
|
class BrainsStructure(datasets.GeneratorBasedBuilder): |
|
""" |
|
A Hugging Face dataset loader for the "Brains Structure" dataset |
|
containing .nii.gz MRI scans plus JSON sidecar metadata. |
|
|
|
Users must pass `trust_remote_code=True` to load, e.g.: |
|
ds = load_dataset("radiata-ai/brains-structure", name="all", split="train", trust_remote_code=True) |
|
""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIGS = [ |
|
BrainsStructureConfig( |
|
name="all", |
|
version=VERSION, |
|
description="All structural MRI data from multiple studies in a BIDS-like arrangement.", |
|
), |
|
] |
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self): |
|
""" |
|
Return the dataset metadata: features, description, homepage, citation, etc. |
|
""" |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"nii_filepath": datasets.Value("string"), |
|
"metadata": { |
|
"participant_id": datasets.Value("string"), |
|
"session_id": datasets.Value("string"), |
|
"study": datasets.Value("string"), |
|
"age": datasets.Value("int32"), |
|
"sex": datasets.Value("string"), |
|
"clinical_diagnosis": datasets.Value("string"), |
|
"scanner_manufacturer": datasets.Value("string"), |
|
"scanner_model": datasets.Value("string"), |
|
"field_strength": datasets.Value("string"), |
|
"image_quality_rating": datasets.Value("string"), |
|
"total_intracranial_volume": datasets.Value("string"), |
|
"split": datasets.Value("string"), |
|
"license": datasets.Value("string"), |
|
"website": datasets.Value("string"), |
|
"citation": datasets.Value("string"), |
|
}, |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
""" |
|
If your data needs to be downloaded, you'd do that here. |
|
However, we assume the data is already in the repository. |
|
We'll discover the data locally. |
|
""" |
|
|
|
|
|
data_dir = dl_manager.manual_dir if dl_manager.manual_dir else dl_manager.dataset_dir |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir, "split_key": "train"}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"data_dir": data_dir, "split_key": "validation"}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"data_dir": data_dir, "split_key": "test"}), |
|
] |
|
|
|
def _generate_examples(self, data_dir, split_key): |
|
""" |
|
This function loads the .nii.gz + .json sidecar files from data_dir, |
|
yields a tuple (id_, example). |
|
The "split" field in the JSON sidecar indicates train/val/test. |
|
We'll filter only those sidecars whose "split" matches `split_key`. |
|
""" |
|
|
|
|
|
|
|
id_ = 0 |
|
for root, dirs, files in os.walk(data_dir): |
|
|
|
for fname in files: |
|
if fname.endswith("_scandata.json"): |
|
sidecar_path = os.path.join(root, fname) |
|
with open(sidecar_path, "r") as f: |
|
metadata = json.load(f) |
|
|
|
if metadata.get("split", None) == split_key: |
|
|
|
|
|
|
|
possible_nii_prefix = fname.replace("_scandata.json", "_T1w") |
|
|
|
nii_filepath = None |
|
for possible_nii in files: |
|
if possible_nii.startswith(possible_nii_prefix) and possible_nii.endswith(".nii.gz"): |
|
nii_filepath = os.path.join(root, possible_nii) |
|
break |
|
|
|
|
|
if not nii_filepath: |
|
logger.warning(f"No corresponding .nii.gz file found for {sidecar_path}") |
|
continue |
|
|
|
|
|
yield id_, { |
|
"id": str(id_), |
|
"nii_filepath": nii_filepath, |
|
"metadata": { |
|
"participant_id": str(metadata.get("participant_id", "")), |
|
"session_id": str(metadata.get("session_id", "")), |
|
"study": str(metadata.get("study", "")), |
|
"age": metadata.get("age", None), |
|
"sex": str(metadata.get("sex", "")), |
|
"clinical_diagnosis": str(metadata.get("clinical_diagnosis", "")), |
|
"scanner_manufacturer": str(metadata.get("scanner_manufacturer", "")), |
|
"scanner_model": str(metadata.get("scanner_model", "")), |
|
"field_strength": str(metadata.get("field_strength", "")), |
|
"image_quality_rating": str(metadata.get("image_quality_rating", "")), |
|
"total_intracranial_volume": str(metadata.get("total_intracranial_volume", "")), |
|
"split": str(metadata.get("split", "")), |
|
"license": str(metadata.get("license", "")), |
|
"website": str(metadata.get("website", "")), |
|
"citation": str(metadata.get("citation", "")), |
|
}, |
|
} |
|
id_ += 1 |
|
|