umisetokikaze commited on
Commit
da0e5d7
·
verified ·
1 Parent(s): e65bef9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -7
README.md CHANGED
@@ -63,17 +63,23 @@ We would like to take this opportunity to thank
63
 
64
  ```python
65
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
66
 
67
- model = AutoModelForCausalLM.from_pretrainedLocal-Novel-LLM-project/Ninja-v1-NSFW-128k", trust_remote_code=True)
68
- tokenizer = AutoTokenizer.from_pretrained("Local-Novel-LLM-project/Ninja-v1-NSFW-128k")
69
 
70
- prompt = "Once upon a time,"
71
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
72
 
73
- output = model.generate(input_ids, max_length=100, do_sample=True)
74
- generated_text = tokenizer.decode(output)
75
 
76
- print(generated_text)
 
 
 
 
 
 
77
  ````
78
 
79
  ## Merge recipe
 
63
 
64
  ```python
65
  from transformers import AutoModelForCausalLM, AutoTokenizer
66
+ import torch
67
 
68
+ model_id = "Local-Novel-LLM-project/Ninja-v1-NSFW-128k"
69
+ new_tokens = 1024
70
 
71
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float16, attn_implementation="flash_attention_2", device_map="auto")
72
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
73
 
74
+ system_prompt = "あなたはプロの小説家です。\n小説を書いてください\n-------- "
 
75
 
76
+ prompt = input("Enter a prompt: ")
77
+ system_prompt += prompt + "\n-------- "
78
+ model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
79
+
80
+
81
+ generated_ids = model.generate(**model_inputs, max_new_tokens=new_tokens, do_sample=True)
82
+ print(tokenizer.batch_decode(generated_ids)[0])
83
  ````
84
 
85
  ## Merge recipe