Commit
·
307c789
1
Parent(s):
2d1f29a
Update README.md
Browse files
README.md
CHANGED
@@ -30,7 +30,7 @@ prompt.
|
|
30 |
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
31 |
>>> model_name = "Sharathhebbar24/chat_gpt2"
|
32 |
>>> model = AutoModelForCausalLM.from_pretrained(model_name)
|
33 |
-
>>> tokenizer = AutoTokenizer.from_pretrained(
|
34 |
>>> def generate_text(prompt):
|
35 |
>>> inputs = tokenizer.encode(prompt, return_tensors='pt')
|
36 |
>>> outputs = mod1.generate(inputs, max_length=64, pad_token_id=tokenizer.eos_token_id)
|
|
|
30 |
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
31 |
>>> model_name = "Sharathhebbar24/chat_gpt2"
|
32 |
>>> model = AutoModelForCausalLM.from_pretrained(model_name)
|
33 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
|
34 |
>>> def generate_text(prompt):
|
35 |
>>> inputs = tokenizer.encode(prompt, return_tensors='pt')
|
36 |
>>> outputs = mod1.generate(inputs, max_length=64, pad_token_id=tokenizer.eos_token_id)
|