Update README.md
Browse files
README.md
CHANGED
@@ -22,8 +22,8 @@ language:
|
|
22 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
23 |
import torch
|
24 |
|
25 |
-
tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B-chat")
|
26 |
-
model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B-chat", torch_dtype=torch.bfloat16, device_map="auto")
|
27 |
|
28 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
29 |
|
@@ -41,7 +41,7 @@ pipe(prompt, max_new_tokens=100, do_sample=False, temperature=0.0, return_full_t
|
|
41 |
from vllm import LLM, SamplingParams
|
42 |
|
43 |
sampling_params = SamplingParams(temperature=0.0, max_tokens=100)
|
44 |
-
llm = LLM(model="lightblue/karasu-7B-chat")
|
45 |
|
46 |
messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
|
47 |
messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
|
|
|
22 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
23 |
import torch
|
24 |
|
25 |
+
tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B-chat-plus")
|
26 |
+
model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B-chat-plus", torch_dtype=torch.bfloat16, device_map="auto")
|
27 |
|
28 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
29 |
|
|
|
41 |
from vllm import LLM, SamplingParams
|
42 |
|
43 |
sampling_params = SamplingParams(temperature=0.0, max_tokens=100)
|
44 |
+
llm = LLM(model="lightblue/karasu-7B-chat-plus")
|
45 |
|
46 |
messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
|
47 |
messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
|