metadata
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 437069
num_examples: 1
download_size: 249442
dataset_size: 437069
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
Code to create the dataset:
from datasets import Dataset, DatasetDict
from pathlib import Path
save_dir = Path.home() / ".cache/mlx-lm/calibration_v5.txt"
if not save_dir.exists():
from urllib import request
save_dir.parent.mkdir(parents=True, exist_ok=True)
url = "https://gist.githubusercontent.com/tristandruyen/9e207a95c7d75ddf37525d353e00659c/raw/571fda718462de863e5a0171078c175420c7649a/calibration_data_v5_rc.txt"
request.urlretrieve(url, save_dir)
with open(save_dir) as fid:
texts = fid.read()
def gen():
yield {"text": texts}
ds = Dataset.from_generator(gen)
ds = DatasetDict({"train": ds})
ds.push_to_hub("mlx-community/mlx_lm_calibration_v5")