|
import os |
|
import zarr |
|
import numpy as np |
|
import datasets |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_CITATION = """\ |
|
@misc{buckman2024, |
|
author = {Buckman, Jacob}, |
|
publisher = {Manifest AI}, |
|
title = {LongCrawl64: {A} {Long-Context} {Natural-Language} {Dataset}}, |
|
date = {2024-08-14}, |
|
langid = {en} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
LongCrawl64 is a dataset for research on architectures and algorithms for long-context modeling. |
|
It consists of 6,661,465 pre-tokenized documents, each of which is 65,536 tokens long, for a total |
|
token count of 435 billion. The dataset is preprocessed with truncation to exactly 64 KiT, |
|
shuffling along document dimension, and rolling each document randomly along sequence dimension. |
|
""" |
|
|
|
|
|
class LongCrawl64Config(datasets.BuilderConfig): |
|
"""BuilderConfig for LongCrawl64.""" |
|
|
|
def __init__(self, context_size=65536, **kwargs): |
|
"""BuilderConfig for LongCrawl64. |
|
|
|
Args: |
|
context_size: The size of context window to use (default is full 64KiT) |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.context_size = context_size |
|
|
|
|
|
class LongCrawl64(datasets.GeneratorBasedBuilder): |
|
"""LongCrawl64 dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
LongCrawl64Config( |
|
name="default", |
|
description="Default configuration with full 64KiT context", |
|
), |
|
LongCrawl64Config( |
|
name="16k", |
|
description="16K context window configuration", |
|
context_size=16384, |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"tokens": datasets.Sequence( |
|
datasets.Value("int32"), length=self.config.context_size |
|
), |
|
"input_ids": datasets.Sequence( |
|
datasets.Value("int32"), length=self.config.context_size |
|
), |
|
} |
|
), |
|
supervised_keys=None, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
data_files = {"train": "data/train.zarr", "validation": "data/heldout.zarr"} |
|
|
|
downloaded_files = dl_manager.download_and_extract(data_files) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"zarr_path": downloaded_files["train"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"zarr_path": downloaded_files["validation"], |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, zarr_path): |
|
"""Yields examples. |
|
|
|
Reads data from the zarr store in chunks and yields examples |
|
according to the specified context size. |
|
""" |
|
logger.info(f"Loading zarr array from {zarr_path}") |
|
z = zarr.open(zarr_path, mode="r") |
|
|
|
|
|
data = z["0.0"] |
|
|
|
|
|
seqs_per_doc = data.shape[1] // self.config.context_size |
|
|
|
for doc_idx in range(data.shape[0]): |
|
|
|
doc_data = data[doc_idx] |
|
|
|
for seq_idx in range(seqs_per_doc): |
|
|
|
start = seq_idx * self.config.context_size |
|
end = start + self.config.context_size |
|
sequence = doc_data[start:end] |
|
|
|
|
|
input_ids = np.roll(sequence, 1) |
|
input_ids[0] = 50256 |
|
|
|
yield f"{doc_idx}-{seq_idx}", { |
|
"tokens": sequence.tolist(), |
|
"input_ids": input_ids.tolist(), |
|
} |
|
|