LG-AI-EXAONE commited on
Commit
b9e0d96
·
1 Parent(s): 07ca1b9

Update LM-Studio guide

Browse files
Files changed (2) hide show
  1. README.md +1 -0
  2. tokenizer_config.json +1 -1
README.md CHANGED
@@ -231,6 +231,7 @@ EXAONE Deep models can be inferred in the various frameworks, such as:
231
  - `SGLang`
232
  - `llama.cpp`
233
  - `Ollama`
 
234
 
235
  Please refer to our [EXAONE Deep GitHub](https://github.com/LG-AI-EXAONE/EXAONE-Deep) for more details about the inference frameworks.
236
 
 
231
  - `SGLang`
232
  - `llama.cpp`
233
  - `Ollama`
234
+ - `LM-Studio`
235
 
236
  Please refer to our [EXAONE Deep GitHub](https://github.com/LG-AI-EXAONE/EXAONE-Deep) for more details about the inference frameworks.
237
 
tokenizer_config.json CHANGED
@@ -3211,7 +3211,7 @@
3211
  "PI:USER"
3212
  ],
3213
  "bos_token": "[BOS]",
3214
- "chat_template": "{% for message in messages %}{% if loop.first and message['role'] != 'system' %}{{ '[|system|][|endofturn|]\n' }}{% endif %}{% set content = message['content'] %}{% if '</thought>' in content %}{% set content = content.split('</thought>')[-1].lstrip('\\n') %}{% endif %}{{ '[|' + message['role'] + '|]' + content }}{% if not message['role'] == 'user' %}{{ '[|endofturn|]' }}{% endif %}{% if not loop.last %}{{ '\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\n[|assistant|]<thought>\n' }}{% endif %}",
3215
  "clean_up_tokenization_spaces": true,
3216
  "eos_token": "[|endofturn|]",
3217
  "model_max_length": 1000000000000000019884624838656,
 
3211
  "PI:USER"
3212
  ],
3213
  "bos_token": "[BOS]",
3214
+ "chat_template": "{% for message in messages %}{% if loop.first and message['role'] != 'system' %}{{ '[|system|][|endofturn|]\\n' }}{% endif %}{% set content = message['content'] %}{% if '</thought>' in content %}{% set content = content.split('</thought>')[-1].lstrip('\\n') %}{% endif %}{{ '[|' + message['role'] + '|]' + content }}{% if not message['role'] == 'user' %}{{ '[|endofturn|]' }}{% endif %}{% if not loop.last %}{{ '\\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\\n[|assistant|]<thought>\\n' }}{% endif %}",
3215
  "clean_up_tokenization_spaces": true,
3216
  "eos_token": "[|endofturn|]",
3217
  "model_max_length": 1000000000000000019884624838656,