--- dataset_info: features: - name: content dtype: string - name: score dtype: float64 - name: source dtype: string splits: - name: train num_bytes: 404127039 num_examples: 100060 download_size: 233844039 dataset_size: 404127039 configs: - config_name: default data_files: - split: train path: data/train-* --- ### Sampling Code ```py from __future__ import annotations import os from typing import List from datasets import Dataset, load_dataset from tqdm.auto import tqdm TARGET_CHARS = 400_000_000 # ≈100 M tokens (≈4 chars/token) BUFFER = 10_000 # streaming shuffle buffer SEED = 42 HF_REPO = "sumuks/Ultra-FineWeb-100M" def sample_ultrafineweb( target_chars: int = TARGET_CHARS, buffer_size: int = BUFFER, seed: int = SEED, ) -> Dataset: """ Stream the Ultra‑FineWeb English split and return a random sample whose total `content` length is at least `target_chars` characters (≈100 M tokens). """ stream = load_dataset("openbmb/Ultra-FineWeb", split="en", streaming=True) stream = stream.shuffle(seed=seed, buffer_size=buffer_size) picked: List[dict] = [] char_count = 0 for row in tqdm(stream, desc="sampling"): text = row["content"] char_count += len(text) picked.append(row) if char_count >= target_chars: break return Dataset.from_list(picked) def main() -> None: ds = sample_ultrafineweb() total_chars = sum(len(r["content"]) for r in ds) print(f"Sampled {len(ds):,} documents, {total_chars:,} characters") ds.push_to_hub(HF_REPO, private=False) if __name__ == "__main__": main() ```