Stack_Tokenized / README.md
Trevor Dohm
Tokenized clean
e3ba1b3 verified
|
raw
history blame
366 Bytes
metadata
dataset_info:
  features:
    - name: content
      dtype: string
    - name: input_ids
      sequence: int32
    - name: attention_mask
      sequence: int8
  splits:
    - name: train
      num_bytes: 8094693
      num_examples: 925
  download_size: 2266949
  dataset_size: 8094693
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/clean/train-*