SmolVLM_Essay_Knowledge_Distillation
/
smolvlm-instruct-trl-sft-ChartQA
/checkpoint-152
/tokenizer_config.json
{ | |
"add_prefix_space": false, | |
"added_tokens_decoder": { | |
"0": { | |
"content": "<|endoftext|>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"1": { | |
"content": "<|im_start|>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"2": { | |
"content": "<|im_end|>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"3": { | |
"content": "<repo_name>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"4": { | |
"content": "<reponame>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"5": { | |
"content": "<file_sep>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"6": { | |
"content": "<filename>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"7": { | |
"content": "<gh_stars>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"8": { | |
"content": "<issue_start>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"9": { | |
"content": "<issue_comment>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"10": { | |
"content": "<issue_closed>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"11": { | |
"content": "<jupyter_start>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"12": { | |
"content": "<jupyter_text>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"13": { | |
"content": "<jupyter_code>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"14": { | |
"content": "<jupyter_output>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"15": { | |
"content": "<jupyter_script>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"16": { | |
"content": "<empty_output>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"49152": { | |
"content": "<fake_token_around_image>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"49153": { | |
"content": "<image>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"49154": { | |
"content": "<end_of_utterance>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
} | |
}, | |
"additional_special_tokens": [ | |
"<fake_token_around_image>", | |
"<image>", | |
"<end_of_utterance>" | |
], | |
"bos_token": "<|im_start|>", | |
"chat_template": "<|im_start|>{% for message in messages %}{{message['role'] | capitalize}}{% if message['content'][0]['type'] == 'image' %}{{':'}}{% else %}{{': '}}{% endif %}{% for line in message['content'] %}{% if line['type'] == 'text' %}{{line['text']}}{% elif line['type'] == 'image' %}{{ '<image>' }}{% endif %}{% endfor %}<end_of_utterance>\n{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}", | |
"clean_up_tokenization_spaces": false, | |
"eos_token": "<end_of_utterance>", | |
"extra_special_tokens": {}, | |
"legacy": false, | |
"model_max_length": 16384, | |
"pad_token": "<|im_end|>", | |
"processor_class": "Idefics3Processor", | |
"tokenizer_class": "GPT2Tokenizer", | |
"truncation_side": "left", | |
"unk_token": "<|endoftext|>", | |
"vocab_size": 49152 | |
} | |