Update README.md
Browse files
README.md
CHANGED
@@ -3,7 +3,7 @@ dataset_info:
|
|
3 |
features:
|
4 |
- name: x
|
5 |
dtype: float64
|
6 |
-
- name: y
|
7 |
dtype: float64
|
8 |
- name: language
|
9 |
dtype: string
|
@@ -15,6 +15,7 @@ dataset_info:
|
|
15 |
num_examples: 5785741
|
16 |
download_size: 112131877
|
17 |
dataset_size: 247037602
|
|
|
18 |
---
|
19 |
|
20 |
![ROOTS Dataset Scatterplot](./datashader.png)
|
@@ -46,6 +47,10 @@ from datasets.utils.py_utils import convert_file_size_to_int
|
|
46 |
def batch_tokenize(batch):
|
47 |
return {'tokenized': [' '.join(e.tokens) for e in tokenizer(batch['text']).encodings]} # "text" column hard encoded
|
48 |
|
|
|
|
|
|
|
|
|
49 |
dset = load_dataset(..., split="train")
|
50 |
|
51 |
dset = dset.map(batch_tokenize, batched=True, batch_size=64, num_proc=28)
|
|
|
3 |
features:
|
4 |
- name: x
|
5 |
dtype: float64
|
6 |
+
- name: 'y'
|
7 |
dtype: float64
|
8 |
- name: language
|
9 |
dtype: string
|
|
|
15 |
num_examples: 5785741
|
16 |
download_size: 112131877
|
17 |
dataset_size: 247037602
|
18 |
+
license: apache-2.0
|
19 |
---
|
20 |
|
21 |
![ROOTS Dataset Scatterplot](./datashader.png)
|
|
|
47 |
def batch_tokenize(batch):
|
48 |
return {'tokenized': [' '.join(e.tokens) for e in tokenizer(batch['text']).encodings]} # "text" column hard encoded
|
49 |
|
50 |
+
# The original viz used a subset of the ROOTS Corpus.
|
51 |
+
# More info on the entire dataset here: https://huggingface.co/bigscience-data
|
52 |
+
# And here: https://arxiv.org/abs/2303.03915
|
53 |
+
|
54 |
dset = load_dataset(..., split="train")
|
55 |
|
56 |
dset = dset.map(batch_tokenize, batched=True, batch_size=64, num_proc=28)
|