import os import json import datasets import logging logger = logging.getLogger(__name__) _DESCRIPTION = """ This dataset contains 3D MRI scans in NIfTI (.nii.gz) format, organized in a BIDS-like structure, alongside JSON sidecar files with metadata. The data represents structural brain MRI scans from multiple studies. For each scan, a `.nii.gz` file is provided, along with a `.json` file containing subject/session metadata, scanner information, clinical diagnoses, etc. """ _CITATION = """ @article{exampleCitation2024, title={Example Brain MRI Dataset Citation}, author={Your Name and Others}, journal={Journal of Great Datasets}, year={2024}, } """ _HOMEPAGE = "https://huggingface.co/datasets/radiata-ai/brains-structure" _LICENSE = "Multiple study-specific licenses; see JSON sidecars for details." class BrainsStructureConfig(datasets.BuilderConfig): """BuilderConfig for the Brains-Structure dataset.""" def __init__(self, **kwargs): super(BrainsStructureConfig, self).__init__(**kwargs) class BrainsStructure(datasets.GeneratorBasedBuilder): """ A Hugging Face dataset loader for the "Brains Structure" dataset containing .nii.gz MRI scans plus JSON sidecar metadata. Users must pass `trust_remote_code=True` to load, e.g.: ds = load_dataset("radiata-ai/brains-structure", name="all", split="train", trust_remote_code=True) """ VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ BrainsStructureConfig( name="all", version=VERSION, description="All structural MRI data from multiple studies in a BIDS-like arrangement.", ), ] DEFAULT_CONFIG_NAME = "all" def _info(self): """ Return the dataset metadata: features, description, homepage, citation, etc. """ return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "nii_filepath": datasets.Value("string"), "metadata": { "participant_id": datasets.Value("string"), "session_id": datasets.Value("string"), "study": datasets.Value("string"), "age": datasets.Value("int32"), "sex": datasets.Value("string"), "clinical_diagnosis": datasets.Value("string"), "scanner_manufacturer": datasets.Value("string"), "scanner_model": datasets.Value("string"), "field_strength": datasets.Value("string"), "image_quality_rating": datasets.Value("string"), "total_intracranial_volume": datasets.Value("string"), "split": datasets.Value("string"), "license": datasets.Value("string"), "website": datasets.Value("string"), "citation": datasets.Value("string"), }, } ), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE, ) def _split_generators(self, dl_manager: datasets.DownloadManager): """ If your data needs to be downloaded, you'd do that here. However, we assume the data is already in the repository. We'll discover the data locally. """ # We can get the data_dir from config/data_dir or from the repository local path # By default, _split_generators receives a `dl_manager` that references the local dataset folder data_dir = dl_manager.manual_dir if dl_manager.manual_dir else dl_manager.dataset_dir # We'll just generate a single "train"/"validation"/"test" split # based on the sidecar "split" field. # Alternatively, you could create three separate splits if you want them enumerated automatically. # For now, let's do a single "train" containing everything, or "all" approach. # The simplest approach is to define them as separate splits: # We'll parse the entire directory, grouping by sidecar "split" field. return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir, "split_key": "train"}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"data_dir": data_dir, "split_key": "validation"}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"data_dir": data_dir, "split_key": "test"}), ] def _generate_examples(self, data_dir, split_key): """ This function loads the .nii.gz + .json sidecar files from data_dir, yields a tuple (id_, example). The "split" field in the JSON sidecar indicates train/val/test. We'll filter only those sidecars whose "split" matches `split_key`. """ # We'll do a recursive walk of data_dir. For each JSON sidecar, read it, # check if "split" == split_key. If so, yield the example. id_ = 0 for root, dirs, files in os.walk(data_dir): # For each JSON file in the directory, attempt to pair it with the corresponding .nii.gz for fname in files: if fname.endswith("_scandata.json"): sidecar_path = os.path.join(root, fname) with open(sidecar_path, "r") as f: metadata = json.load(f) if metadata.get("split", None) == split_key: # Build the .nii.gz path # Typically, it's sub-XYZ_ses-XYZ_T1w.nii.gz or similar # We'll guess it's in the same folder with a name that starts the same except `_scandata.json` possible_nii_prefix = fname.replace("_scandata.json", "_T1w") # Find any .nii.gz that starts with that prefix nii_filepath = None for possible_nii in files: if possible_nii.startswith(possible_nii_prefix) and possible_nii.endswith(".nii.gz"): nii_filepath = os.path.join(root, possible_nii) break # If not found, skip if not nii_filepath: logger.warning(f"No corresponding .nii.gz file found for {sidecar_path}") continue # Build the example yield id_, { "id": str(id_), "nii_filepath": nii_filepath, "metadata": { "participant_id": str(metadata.get("participant_id", "")), "session_id": str(metadata.get("session_id", "")), "study": str(metadata.get("study", "")), "age": metadata.get("age", None), "sex": str(metadata.get("sex", "")), "clinical_diagnosis": str(metadata.get("clinical_diagnosis", "")), "scanner_manufacturer": str(metadata.get("scanner_manufacturer", "")), "scanner_model": str(metadata.get("scanner_model", "")), "field_strength": str(metadata.get("field_strength", "")), "image_quality_rating": str(metadata.get("image_quality_rating", "")), "total_intracranial_volume": str(metadata.get("total_intracranial_volume", "")), "split": str(metadata.get("split", "")), "license": str(metadata.get("license", "")), "website": str(metadata.get("website", "")), "citation": str(metadata.get("citation", "")), }, } id_ += 1