niryuu commited on
Commit
0dd045e
·
verified ·
1 Parent(s): b5e5c2c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +75 -0
README.md CHANGED
@@ -37,6 +37,81 @@ And then fine-tuned using LoRA with dataset:
37
  - h: kanhatakeyama/ramdom-to-fixed-multiturn-Calm3
38
  - a: Aratako/Magpie-Tanuki-8B-97k
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  ## Use with mlx
41
 
42
  ```bash
 
37
  - h: kanhatakeyama/ramdom-to-fixed-multiturn-Calm3
38
  - a: Aratako/Magpie-Tanuki-8B-97k
39
 
40
+ ## Use for Evaluation
41
+
42
+ ```python
43
+ # -*- coding: utf-8 -*-
44
+
45
+ !pip install -U bitsandbytes
46
+ !pip install -U transformers
47
+ !pip install -U accelerate
48
+ !pip install -U datasets
49
+ !pip install -U peft
50
+
51
+ from transformers import (
52
+ AutoModelForCausalLM,
53
+ AutoTokenizer,
54
+ BitsAndBytesConfig,
55
+ )
56
+ from peft import PeftModel
57
+ import torch
58
+ from tqdm import tqdm
59
+ import json
60
+
61
+ # Hugging Faceで取得したTokenをこちらに貼る。
62
+ HF_TOKEN = "dummy"
63
+
64
+ model_id = "niryuu/llm-jp-3-13b-ha"
65
+
66
+ # QLoRA config
67
+ bnb_config = BitsAndBytesConfig(
68
+ load_in_4bit=True,
69
+ bnb_4bit_quant_type="nf4",
70
+ bnb_4bit_compute_dtype=torch.bfloat16,
71
+ )
72
+
73
+ # Load model
74
+ model = AutoModelForCausalLM.from_pretrained(
75
+ model_id,
76
+ quantization_config=bnb_config,
77
+ device_map="auto",
78
+ token = HF_TOKEN
79
+ )
80
+
81
+ # Load tokenizer
82
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, token = HF_TOKEN)
83
+
84
+ # load dataset
85
+ datasets = []
86
+ with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
87
+ item = ""
88
+ for line in f:
89
+ line = line.strip()
90
+ item += line
91
+ if item.endswith("}"):
92
+ datasets.append(json.loads(item))
93
+ item = ""
94
+
95
+ results = []
96
+ for data in tqdm(datasets):
97
+
98
+ input = data["input"]
99
+ token_ids = tokenizer.apply_chat_template([{"role": "user", "content": input}], tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
100
+
101
+ outputs = model.generate(input_ids, max_new_tokens=2048, do_sample=False, repetition_penalty=1.2,)
102
+ output = tokenizer.decode(outputs[0][token_ids.size(1) :], skip_special_tokens=True)
103
+
104
+ results.append({"task_id": data["task_id"], "input": input, "output": output})
105
+
106
+ # save outputs
107
+ import re
108
+ jsonl_id = re.sub(".*/", "", model_id)
109
+ with open(f"./{jsonl_id}-outputs.jsonl", 'w', encoding='utf-8') as f:
110
+ for result in results:
111
+ json.dump(result, f, ensure_ascii=False) # ensure_ascii=False for handling non-ASCII characters
112
+ f.write('\n')
113
+ ```
114
+
115
  ## Use with mlx
116
 
117
  ```bash