Commit
·
5824687
1
Parent(s):
30b275f
Initial commit
Browse files- merges.txt +0 -0
- scratch.py +15 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
scratch.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#%%
|
2 |
+
|
3 |
+
import json
|
4 |
+
fname = "tokenizer.json"
|
5 |
+
|
6 |
+
with open(fname, 'r') as f:
|
7 |
+
vocab = json.load(f)
|
8 |
+
|
9 |
+
# %%
|
10 |
+
|
11 |
+
# dump this is vocab_2.json
|
12 |
+
fname2 = "vocab_2.json"
|
13 |
+
with open(fname2, 'w') as f:
|
14 |
+
json.dump(vocab, f)
|
15 |
+
# %%
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "padding": "max_length", "model_max_length": 1024, "special_tokens_map_file": null, "tokenizer_file": "/home/jupyter/.cache/huggingface/transformers/16a2f78023c8dc511294f0c97b5e10fde3ef9889ad6d11ffaa2a00714e73926e.cf2d0ecb83b6df91b3dbb53f1d1e4c311578bfd3aa0e04934215a49bf9898df0", "name_or_path": "gpt2_sum_1/checkpoint-1977", "tokenizer_class": "GPT2Tokenizer"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|