{ | |
"add_prefix_space": false, | |
"added_tokens_decoder": { | |
"0": { | |
"content": "<s>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"1": { | |
"content": "<pad>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"2": { | |
"content": "</s>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"3": { | |
"content": "<unk>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"4": { | |
"content": "<mask>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"5": { | |
"content": "<|endoftext|>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"6": { | |
"content": "<|startoftext|>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"7": { | |
"content": "<nl>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"8": { | |
"content": "<hs>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"9": { | |
"content": "<sep>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"10": { | |
"content": "<cls>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"42000": { | |
"content": "[N]", | |
"lstrip": false, | |
"normalized": true, | |
"rstrip": false, | |
"single_word": false, | |
"special": false | |
} | |
}, | |
"bos_token": "<|endoftext|>", | |
"clean_up_tokenization_spaces": false, | |
"eos_token": "<|endoftext|>", | |
"errors": "replace", | |
"extra_special_tokens": {}, | |
"model_max_length": 1024, | |
"tokenizer_class": "GPT2Tokenizer", | |
"unk_token": "<|endoftext|>" | |
} | |