Update README.md
Browse files
README.md
CHANGED
@@ -83,3 +83,24 @@ parameters:
|
|
83 |
dtype: float16
|
84 |
|
85 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
dtype: float16
|
84 |
|
85 |
```
|
86 |
+
# sample
|
87 |
+
|
88 |
+
```python
|
89 |
+
|
90 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
91 |
+
import torch
|
92 |
+
|
93 |
+
tokenizer = AutoTokenizer.from_pretrained("prithivMLmods/GWQ2b")
|
94 |
+
model = AutoModelForCausalLM.from_pretrained(
|
95 |
+
"Sakalti/SJT-2B-V1.1",
|
96 |
+
device_map="auto",
|
97 |
+
torch_dtype=torch.float16,
|
98 |
+
)
|
99 |
+
|
100 |
+
input_text = "おはようこざいます!。"
|
101 |
+
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
102 |
+
|
103 |
+
outputs = model.generate(**input_ids, max_new_tokens=200, temperature=0.7)
|
104 |
+
print(tokenizer.decode(outputs[0]))
|
105 |
+
|
106 |
+
```
|