Transformers
PyTorch
code
custom_code
Inference Endpoints
codesage commited on
Commit
fe3c33f
·
verified ·
1 Parent(s): 37adc26

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -30,7 +30,8 @@ from transformers import AutoModel, AutoTokenizer
30
  checkpoint = "codesage/codesage-base"
31
  device = "cuda" # for GPU usage or "cpu" for CPU usage
32
 
33
- # CodeSage requires adding eos token at the end of each tokenized sequence to ensure good performance
 
34
  tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True, add_eos_token=True)
35
 
36
  model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).to(device)
 
30
  checkpoint = "codesage/codesage-base"
31
  device = "cuda" # for GPU usage or "cpu" for CPU usage
32
 
33
+ # Note: CodeSage requires adding eos token at the end of
34
+ # each tokenized sequence to ensure good performance
35
  tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True, add_eos_token=True)
36
 
37
  model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).to(device)