Update README.md
Browse files
README.md
CHANGED
|
@@ -77,8 +77,17 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
| 77 |
tokenizer = AutoTokenizer.from_pretrained("ayeshaNoor1/Llama_finetunedModel")
|
| 78 |
model = AutoModelForCausalLM.from_pretrained("ayeshaNoor1/Llama_finetunedModel")
|
| 79 |
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
outputs = model.generate(**inputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
```
|
| 83 |
|
| 84 |
## Training Details
|
|
|
|
| 77 |
tokenizer = AutoTokenizer.from_pretrained("ayeshaNoor1/Llama_finetunedModel")
|
| 78 |
model = AutoModelForCausalLM.from_pretrained("ayeshaNoor1/Llama_finetunedModel")
|
| 79 |
|
| 80 |
+
# Sample input text
|
| 81 |
+
input_text = "I'm feeling really down lately. Can you help me?"
|
| 82 |
+
|
| 83 |
+
# Tokenize and generate response
|
| 84 |
+
inputs = tokenizer(input_text, return_tensors="pt")
|
| 85 |
outputs = model.generate(**inputs)
|
| 86 |
+
|
| 87 |
+
# Decode and print the response
|
| 88 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 89 |
+
print(response)
|
| 90 |
+
|
| 91 |
```
|
| 92 |
|
| 93 |
## Training Details
|