Update README.md
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ model = LlamaForCausalLM.from_pretrained("decapoda-research/llama-7b-hf",
|
|
23 |
# Load the LoRA model
|
24 |
model = PeftModel.from_pretrained(model, peft_model_id)
|
25 |
```
|
26 |
-
##
|
27 |
|
28 |
```python
|
29 |
def generate_prompt(instruction, input=None):
|
@@ -48,7 +48,7 @@ Output:"""
|
|
48 |
```
|
49 |
|
50 |
## Evaluation
|
51 |
-
|
52 |
|
53 |
```python
|
54 |
generation_config = GenerationConfig(
|
|
|
23 |
# Load the LoRA model
|
24 |
model = PeftModel.from_pretrained(model, peft_model_id)
|
25 |
```
|
26 |
+
## Prompt Template
|
27 |
|
28 |
```python
|
29 |
def generate_prompt(instruction, input=None):
|
|
|
48 |
```
|
49 |
|
50 |
## Evaluation
|
51 |
+
feel free to change the parameters inside `GenerationConfig` to get better result.
|
52 |
|
53 |
```python
|
54 |
generation_config = GenerationConfig(
|