File size: 3,049 Bytes
e39fe4e 94049af e39fe4e 8046cf8 e39fe4e 8046cf8 94049af e39fe4e de6faf1 742e83e de6faf1 b4ae29f de6faf1 b4ae29f de6faf1 b4ae29f de6faf1 5658188 de6faf1 5658188 94049af 5658188 de6faf1 742e83e de6faf1 742e83e b4ae29f de6faf1 5658188 de6faf1 742e83e de6faf1 742e83e de6faf1 231b5e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
---
dataset_info:
features:
- name: x
dtype: float64
- name: 'y'
dtype: float64
- name: language
dtype: string
- name: corpus
dtype: string
splits:
- name: train
num_bytes: 247037602
num_examples: 5785741
download_size: 112131877
dataset_size: 247037602
license: apache-2.0
---
What follows is research code. It is by no means optimized for speed, efficiency, or readability.
## Data loading, tokenizing and sharding
```python
import os
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import TruncatedSVD
from tqdm.notebook import tqdm
from openTSNE import TSNE
import datashader as ds
import colorcet as cc
from dask.distributed import Client
import dask.dataframe as dd
import dask_ml
import dask.bag as db
from transformers import AutoTokenizer
from datasets import load_dataset
from datasets.utils.py_utils import convert_file_size_to_int
def batch_tokenize(batch):
return {'tokenized': [' '.join(e.tokens) for e in tokenizer(batch['text']).encodings]} # "text" column hard encoded
# The original viz used a subset of the ROOTS Corpus.
# More info on the entire dataset here: https://huggingface.co/bigscience-data
# And here: https://arxiv.org/abs/2303.03915
dset = load_dataset(..., split="train")
dset = dset.map(batch_tokenize, batched=True, batch_size=64, num_proc=28)
dset_name = "roots_subset"
max_shard_size = convert_file_size_to_int('300MB')
dataset_nbytes = dset.data.nbytes
num_shards = int(dataset_nbytes / max_shard_size) + 1
num_shards = max(num_shards, 1)
print(f"Sharding into {num_shards} files.")
os.makedirs(f"{dset_name}/tokenized", exist_ok=True)
for shard_index in tqdm(range(num_shards)):
shard = dset.shard(num_shards=num_shards, index=shard_index, contiguous=True)
shard.to_parquet(f"{dset_name}/tokenized/tokenized-{shard_index:03d}.parquet")
```
## Embedding
```python
client = Client() # To keep track of dask computation
client
df = dd.read_parquet(f'{dset_name}/tokenized/')
vect = dask_ml.feature_extraction.text.CountVectorizer(tokenizer=str.split,
token_pattern=None,
vocabulary=vocab)
tokenized_bag = df['tokenized'].to_bag()
X = vect.transform(tokenized_bag)
counts = X.compute()
client.shutdown()
tfidf_transformer = TfidfTransformer(sublinear_tf=True, norm="l2")
tfidf = tfidf_transformer.fit_transform(counts)
svd = TruncatedSVD(n_components=160)
X_svd = svd.fit_transform(tfidf)
tsne = TSNE(
perplexity=30, # not sure what param setting resulted in the plot
n_jobs=28,
random_state=42,
verbose=True,
)
tsne_embedding = tsne.fit(X)
```
## Plotting
```python
df = pd.DataFrame(data=tsne_embedding, columns=['x','y'])
agg = ds.Canvas(plot_height=600, plot_width=600).points(df, 'x', 'y')
img = ds.tf.shade(agg, cmap=cc.fire, how='eq_hist')
ds.tf.set_background(img, "black")
```
![ROOTS Dataset Scatterplot](./datashader.png) |