Update README.md
Browse files
README.md
CHANGED
@@ -53,7 +53,6 @@ alpaca_prompt = """
|
|
53 |
{}"""
|
54 |
|
55 |
|
56 |
-
EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN
|
57 |
def formatting_prompts_func(examples):
|
58 |
instructions = examples["instruction"]
|
59 |
inputs = examples["input"]
|
@@ -74,6 +73,8 @@ model, tokenizer = FastLanguageModel.from_pretrained(
|
|
74 |
)
|
75 |
FastLanguageModel.for_inference(model)
|
76 |
|
|
|
|
|
77 |
inputs = tokenizer(
|
78 |
[
|
79 |
alpaca_prompt.format(
|
@@ -87,9 +88,6 @@ outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True)
|
|
87 |
tokenizer.batch_decode(outputs)
|
88 |
```
|
89 |
|
90 |
-
[More Information Needed]
|
91 |
-
|
92 |
-
|
93 |
## Model Card Contact
|
94 |
|
95 |
|
|
53 |
{}"""
|
54 |
|
55 |
|
|
|
56 |
def formatting_prompts_func(examples):
|
57 |
instructions = examples["instruction"]
|
58 |
inputs = examples["input"]
|
|
|
73 |
)
|
74 |
FastLanguageModel.for_inference(model)
|
75 |
|
76 |
+
EOS_TOKEN = tokenizer.eos_token
|
77 |
+
|
78 |
inputs = tokenizer(
|
79 |
[
|
80 |
alpaca_prompt.format(
|
|
|
88 |
tokenizer.batch_decode(outputs)
|
89 |
```
|
90 |
|
|
|
|
|
|
|
91 |
## Model Card Contact
|
92 |
|
93 |