Text Generation
Transformers
Safetensors
Japanese
English
mistral
conversational
text-generation-inference
Inference Endpoints
ptrdvn commited on
Commit
5a52669
·
1 Parent(s): ade8a41

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -22,8 +22,8 @@ language:
22
  from transformers import AutoTokenizer, AutoModelForCausalLM
23
  import torch
24
 
25
- tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B-chat")
26
- model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B-chat", torch_dtype=torch.bfloat16, device_map="auto")
27
 
28
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
29
 
@@ -41,7 +41,7 @@ pipe(prompt, max_new_tokens=100, do_sample=False, temperature=0.0, return_full_t
41
  from vllm import LLM, SamplingParams
42
 
43
  sampling_params = SamplingParams(temperature=0.0, max_tokens=100)
44
- llm = LLM(model="lightblue/karasu-7B-chat")
45
 
46
  messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
47
  messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
 
22
  from transformers import AutoTokenizer, AutoModelForCausalLM
23
  import torch
24
 
25
+ tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B-chat-plus")
26
+ model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B-chat-plus", torch_dtype=torch.bfloat16, device_map="auto")
27
 
28
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
29
 
 
41
  from vllm import LLM, SamplingParams
42
 
43
  sampling_params = SamplingParams(temperature=0.0, max_tokens=100)
44
+ llm = LLM(model="lightblue/karasu-7B-chat-plus")
45
 
46
  messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
47
  messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})