debug
Browse files
README.md
CHANGED
|
@@ -23,7 +23,7 @@ For example, to chat with the finance model:
|
|
| 23 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 24 |
|
| 25 |
model = AutoModelForCausalLM.from_pretrained("AdaptLLM/finance-chat")
|
| 26 |
-
tokenizer = AutoTokenizer.from_pretrained("AdaptLLM/finance-chat")
|
| 27 |
|
| 28 |
# Put your input here:
|
| 29 |
user_input = '''Use this fact to answer the question: Title of each class Trading Symbol(s) Name of each exchange on which registered
|
|
@@ -42,7 +42,7 @@ inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).input_
|
|
| 42 |
outputs = model.generate(input_ids=inputs, max_length=4096)[0]
|
| 43 |
|
| 44 |
answer_start = int(inputs.shape[-1])
|
| 45 |
-
pred = tokenizer.decode(outputs[answer_start:], skip_special_tokens=True
|
| 46 |
|
| 47 |
print(f'### User Input:\n{user_input}\n\n### Assistant Output:\n{pred}')
|
| 48 |
```
|
|
|
|
| 23 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 24 |
|
| 25 |
model = AutoModelForCausalLM.from_pretrained("AdaptLLM/finance-chat")
|
| 26 |
+
tokenizer = AutoTokenizer.from_pretrained("AdaptLLM/finance-chat", use_fast=False)
|
| 27 |
|
| 28 |
# Put your input here:
|
| 29 |
user_input = '''Use this fact to answer the question: Title of each class Trading Symbol(s) Name of each exchange on which registered
|
|
|
|
| 42 |
outputs = model.generate(input_ids=inputs, max_length=4096)[0]
|
| 43 |
|
| 44 |
answer_start = int(inputs.shape[-1])
|
| 45 |
+
pred = tokenizer.decode(outputs[answer_start:], skip_special_tokens=True)
|
| 46 |
|
| 47 |
print(f'### User Input:\n{user_input}\n\n### Assistant Output:\n{pred}')
|
| 48 |
```
|