wonderwind271 commited on
Commit
77c164e
·
verified ·
1 Parent(s): 9ca3019

Upload folder using huggingface_hub

Browse files
lora/sft/README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: THUDM/glm-4-9b-chat
3
+ library_name: peft
4
+ license: other
5
+ tags:
6
+ - llama-factory
7
+ - lora
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: sft
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sft
18
+
19
+ This model is a fine-tuned version of [THUDM/glm-4-9b-chat](https://huggingface.co/THUDM/glm-4-9b-chat) on the formatted dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.5199
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 1e-05
41
+ - train_batch_size: 1
42
+ - eval_batch_size: 1
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 8
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 1
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - PEFT 0.12.0
58
+ - Transformers 4.43.2
59
+ - Pytorch 2.2.2+cu121
60
+ - Datasets 2.20.0
61
+ - Tokenizers 0.19.1
lora/sft/adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "THUDM/glm-4-9b-chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "query_key_value",
24
+ "dense_h_to_4h",
25
+ "dense",
26
+ "dense_4h_to_h"
27
+ ],
28
+ "task_type": "CAUSAL_LM",
29
+ "use_dora": false,
30
+ "use_rslora": false
31
+ }
lora/sft/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:830c3647e075857538ee6e0cf9629004590472edb43fe259103d0065050b6cd6
3
+ size 84754200
lora/sft/added_tokens.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<eop>": 151334,
3
+ "<sop>": 151333,
4
+ "<|assistant|>": 151337,
5
+ "<|begin_of_image|>": 151339,
6
+ "<|begin_of_video|>": 151341,
7
+ "<|end_of_image|>": 151340,
8
+ "<|end_of_video|>": 151342,
9
+ "<|endoftext|>": 151329,
10
+ "<|observation|>": 151338,
11
+ "<|system|>": 151335,
12
+ "<|user|>": 151336,
13
+ "[MASK]": 151330,
14
+ "[gMASK]": 151331,
15
+ "[sMASK]": 151332
16
+ }
lora/sft/all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9955555555555555,
3
+ "eval_loss": 1.5198827981948853,
4
+ "eval_runtime": 11.7693,
5
+ "eval_samples_per_second": 8.497,
6
+ "eval_steps_per_second": 8.497,
7
+ "total_flos": 1.2980476361834496e+16,
8
+ "train_loss": 1.7745797293526786,
9
+ "train_runtime": 341.4533,
10
+ "train_samples_per_second": 2.636,
11
+ "train_steps_per_second": 0.328
12
+ }
lora/sft/checkpoint-112/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: THUDM/glm-4-9b-chat
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.12.0
lora/sft/checkpoint-112/adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "THUDM/glm-4-9b-chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "query_key_value",
24
+ "dense_h_to_4h",
25
+ "dense",
26
+ "dense_4h_to_h"
27
+ ],
28
+ "task_type": "CAUSAL_LM",
29
+ "use_dora": false,
30
+ "use_rslora": false
31
+ }
lora/sft/checkpoint-112/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:830c3647e075857538ee6e0cf9629004590472edb43fe259103d0065050b6cd6
3
+ size 84754200
lora/sft/checkpoint-112/added_tokens.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<eop>": 151334,
3
+ "<sop>": 151333,
4
+ "<|assistant|>": 151337,
5
+ "<|begin_of_image|>": 151339,
6
+ "<|begin_of_video|>": 151341,
7
+ "<|end_of_image|>": 151340,
8
+ "<|end_of_video|>": 151342,
9
+ "<|endoftext|>": 151329,
10
+ "<|observation|>": 151338,
11
+ "<|system|>": 151335,
12
+ "<|user|>": 151336,
13
+ "[MASK]": 151330,
14
+ "[gMASK]": 151331,
15
+ "[sMASK]": 151332
16
+ }
lora/sft/checkpoint-112/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8846b93dcae5983ca52f2617bf8c1037f9e36dc656c50d11bdb8a17f9886378
3
+ size 169679354
lora/sft/checkpoint-112/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386fcc8cc1089aade9450d86fb239ea3483f455fd2d78d8378645feecfec9d69
3
+ size 14244
lora/sft/checkpoint-112/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4fc31a656c2809f413c2392223b8c06a2006c5191ca89eebdb44beedb6ae4e7
3
+ size 1064
lora/sft/checkpoint-112/special_tokens_map.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "[MASK]",
5
+ "[gMASK]",
6
+ "[sMASK]",
7
+ "<sop>",
8
+ "<eop>",
9
+ "<|system|>",
10
+ "<|user|>",
11
+ "<|assistant|>",
12
+ "<|observation|>",
13
+ "<|begin_of_image|>",
14
+ "<|end_of_image|>",
15
+ "<|begin_of_video|>",
16
+ "<|end_of_video|>"
17
+ ],
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<|endoftext|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ }
32
+ }
lora/sft/checkpoint-112/tokenization_chatglm.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import regex as re
2
+ import base64
3
+ import os
4
+ import json
5
+ import tiktoken
6
+ from torch import TensorType
7
+ from typing import List, Optional, Union, Dict, Any
8
+ from transformers import PreTrainedTokenizer
9
+ from transformers.utils import logging, PaddingStrategy
10
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
11
+
12
+
13
+ class ChatGLM4Tokenizer(PreTrainedTokenizer):
14
+ vocab_files_names = {"vocab_file": "tokenizer.model"}
15
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
16
+
17
+ def __init__(
18
+ self,
19
+ vocab_file,
20
+ padding_side="left",
21
+ clean_up_tokenization_spaces=False,
22
+ encode_special_tokens=False,
23
+ **kwargs
24
+ ):
25
+ self.name = "GLM4Tokenizer"
26
+ self.vocab_file = vocab_file
27
+ pat_str = "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
28
+ self.pat_str = re.compile(pat_str)
29
+ self.encode_special_tokens = encode_special_tokens
30
+
31
+ mergeable_ranks = {}
32
+ with open(vocab_file) as f:
33
+ for line in f:
34
+ token, rank = line.strip().split()
35
+ rank = int(rank)
36
+ token = base64.b64decode(token)
37
+ mergeable_ranks[token] = rank
38
+
39
+ self.mergeable_ranks = mergeable_ranks
40
+
41
+ self.tokenizer = tiktoken.Encoding(
42
+ name="my_tokenizer",
43
+ pat_str=pat_str,
44
+ mergeable_ranks=mergeable_ranks,
45
+ special_tokens={}
46
+ )
47
+ self.decoder = {rank: token for token, rank in mergeable_ranks.items()}
48
+ self.n_words = len(self.decoder)
49
+
50
+ super().__init__(
51
+ padding_side=padding_side,
52
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
53
+ **kwargs
54
+ )
55
+
56
+ @property
57
+ def vocab_size(self):
58
+ return self.n_words
59
+
60
+ def get_vocab(self):
61
+ """ Returns vocab as a dict """
62
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
63
+ vocab.update(self.added_tokens_encoder)
64
+ return vocab
65
+
66
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str, int]]) -> str:
67
+ """
68
+ Converts a sequence of tokens in a single string.
69
+ """
70
+ text = ""
71
+ temp = b""
72
+ for t in tokens:
73
+ if isinstance(t, int):
74
+ t = chr(t)
75
+ if isinstance(t, str):
76
+ if temp:
77
+ text += temp.decode("utf-8", errors="replace")
78
+ elif isinstance(t, bytes):
79
+ temp += t
80
+ else:
81
+ raise TypeError("token should only be of type int, bytes or str")
82
+ if temp:
83
+ text += temp.decode("utf-8", errors="replace")
84
+ return text
85
+
86
+ def _tokenize(self, text, **kwargs):
87
+ tokens = []
88
+ ids = self.tokenizer.encode(text)
89
+ for t in ids:
90
+ tokens.append(self.decoder[t])
91
+ return tokens
92
+
93
+ def _convert_token_to_id(self, token):
94
+ """ Converts a token (str) in an id using the vocab. """
95
+ return self.mergeable_ranks[token]
96
+
97
+ def _convert_id_to_token(self, index):
98
+ """Converts an index (integer) in a token (str) using the vocab."""
99
+ return self.decoder.get(index, "")
100
+
101
+ def save_vocabulary(self, save_directory, filename_prefix=None):
102
+ """
103
+ Save the vocabulary and special tokens file to a directory.
104
+
105
+ Args:
106
+ save_directory (`str`):
107
+ The directory in which to save the vocabulary.
108
+ filename_prefix (`str`, *optional*):
109
+ An optional prefix to add to the named of the saved files.
110
+
111
+ Returns:
112
+ `Tuple(str)`: Paths to the files saved.
113
+ """
114
+ if os.path.isdir(save_directory):
115
+ vocab_file = os.path.join(
116
+ save_directory, self.vocab_files_names["vocab_file"]
117
+ )
118
+ else:
119
+ vocab_file = save_directory
120
+
121
+ with open(self.vocab_file, 'rb') as fin:
122
+ proto_str = fin.read()
123
+
124
+ with open(vocab_file, "wb") as writer:
125
+ writer.write(proto_str)
126
+
127
+ return (vocab_file,)
128
+
129
+ def get_prefix_tokens(self):
130
+ prefix_tokens = [self.convert_tokens_to_ids("[gMASK]"), self.convert_tokens_to_ids("<sop>")]
131
+ return prefix_tokens
132
+
133
+ def build_single_message(self, role, metadata, message, tokenize=True):
134
+ assert role in ["system", "user", "assistant", "observation"], role
135
+ if tokenize:
136
+ role_tokens = [self.convert_tokens_to_ids(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n",
137
+ disallowed_special=())
138
+ message_tokens = self.tokenizer.encode(message, disallowed_special=())
139
+ tokens = role_tokens + message_tokens
140
+ return tokens
141
+ else:
142
+ return str(f"<|{role}|>{metadata}\n{message}")
143
+
144
+ # Use Jinja Template in tokenizer_config.json
145
+ # def apply_chat_template(
146
+ # self,
147
+ # conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]], "Conversation"],
148
+ # add_generation_prompt: bool = False,
149
+ # tokenize: bool = True,
150
+ # padding: bool = False,
151
+ # truncation: bool = False,
152
+ # max_length: Optional[int] = None,
153
+ # return_tensors: Optional[Union[str, TensorType]] = None,
154
+ # return_dict: bool = False,
155
+ # tokenizer_kwargs: Optional[Dict[str, Any]] = None,
156
+ # add_special_tokens: bool = True,
157
+ # **kwargs,
158
+ # ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:
159
+ #
160
+ # if return_dict and not tokenize:
161
+ # raise ValueError(
162
+ # "`return_dict=True` is incompatible with `tokenize=False`, because there is no dict "
163
+ # "of tokenizer outputs to return."
164
+ # )
165
+ #
166
+ # def handle_single_conversation(conversation):
167
+ # input_ids = self.get_prefix_tokens() if add_special_tokens else []
168
+ # input_message = "[gMASK]<sop>" if add_special_tokens else ""
169
+ # for item in conversation:
170
+ # if item.get("tools"):
171
+ # tools = item["tools"]
172
+ # content = "你是一个名为 GhatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。"
173
+ # content += "\n\n# 可用工具"
174
+ # for tool in tools:
175
+ # if tool["type"] == "function":
176
+ # function = tool["function"]
177
+ # content += f"\n\n## {function['name']}\n\n{json.dumps(function, ensure_ascii=False, indent=4)}"
178
+ # content += "\n在调用上述函数时,请使用 Json 格式表示调用的参数。"
179
+ # elif tool["type"] == "python":
180
+ # content += "\n\n## python\n\n当你向 `python` 发送包含 Python 代码的消息时,该代码将会在一个有状态的 Jupyter notebook 环境中执行。\n`python` 返回代码执行的输出,或在执行 60 秒后返回超时。\n`/mnt/data` 将会持久化存储你的文件。在此会话中,`python` 无法访问互联网。不要使用 `python` 进行任何网络请求或者在线 API 调用,这些在线内容的访问将不会成功。"
181
+ # elif tool["type"] == "simple_browser":
182
+ # content += "\n\n## simple_browser\n\n你可以使用 `simple_browser` 工具。该工具支持以下函数:\n`search(query: str, recency_days: int)`:使用搜索引擎进行查询并显示结果,可以使用 `recency_days` 参数控制搜索内容的时效性。\n`mclick(ids: list[int])`:获取一系列指定 id 的页面内容。每次调用时,须选择3-10个页面。选择多个角度的页面,同时尽可能选择可信任的信息来源。考虑到部分页面是无法加载的,你也可以多打开一些可能有用的页面而不用担心内容过多。\n`open_url(url: str)`:打开指定的 URL。\n\n使用 `【{引用 id}†{引用文本}】` 来引用内容。\n\n操作步骤:1. 使用 `search` 来获得信息列表; 2. 使用 `mclick` 来获取指定 ID 页面的内容; 3. 根据获得的内容进行回复。在回复中应当引用信息来源。\n 如果用户提供了 URL,也可以用 `open_url` 直接打开页面。\n如果初次搜索结果没有找到合适的信息,也可以再次使用 `search` 进行搜索。"
183
+ # elif tool["type"] == "cogview":
184
+ # content += "\n\n## cogview\n\n如果用户的请求中包含了对图像的描述,你可以使用 `cogview` 来生成图像并展示给用户。你需要向 `cogview` 发送图像描述,规则:\n- 发送给 `cogview` 的消息必须使用英语。用户的中文描述必须完全翻译为英语。\n- 应当尽可能详细地描述图像生成的需求,需求描述约 100 英文单词。\n- 保持用户原始描述的意图。不要虚构内容或者没见过的人物。\n- 如无特殊说明,所在地为中国,持有中国立场并遵循中国社会主义价值观。"
185
+ # else:
186
+ # raise NotImplementedError(f"Unknown tool type {tool['type']}")
187
+ # input = self.build_single_message("system", "", content, tokenize=tokenize)
188
+ # if tokenize:
189
+ # input_ids.extend(input)
190
+ # else:
191
+ # input_message += input
192
+ # if item["content"]:
193
+ # input = self.build_single_message(
194
+ # item["role"],
195
+ # item.get("metadata", ""),
196
+ # item["content"],
197
+ # tokenize=tokenize
198
+ # )
199
+ # if tokenize:
200
+ # input_ids.extend(input)
201
+ # else:
202
+ # input_message += input
203
+ # if add_generation_prompt:
204
+ # if tokenize:
205
+ # input_ids.extend([self.convert_tokens_to_ids("<|assistant|>")])
206
+ # else:
207
+ # input_message += "<|assistant|>"
208
+ # return input_ids if tokenize else input_message
209
+ #
210
+ # # Main logic to handle different conversation formats
211
+ # if isinstance(conversation, list) and all(isinstance(i, dict) for i in conversation):
212
+ # result = handle_single_conversation(conversation)
213
+ # elif isinstance(conversation, list) and all(isinstance(i, list) for i in conversation):
214
+ # result = [handle_single_conversation(c) for c in conversation]
215
+ # elif hasattr(conversation, "messages"):
216
+ # result = handle_single_conversation(conversation.messages)
217
+ # else:
218
+ # raise ValueError("Invalid conversation format")
219
+ #
220
+ # if tokenize:
221
+ # output = self.batch_encode_plus(
222
+ # [result] if isinstance(result[0], int) else result,
223
+ # padding=padding,
224
+ # truncation=truncation,
225
+ # max_length=max_length,
226
+ # return_tensors=return_tensors,
227
+ # is_split_into_words=True,
228
+ # add_special_tokens=False
229
+ # )
230
+ # if return_dict:
231
+ # return output
232
+ # else:
233
+ # return output["input_ids"]
234
+ # else:
235
+ # return result
236
+
237
+ def build_inputs_with_special_tokens(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
239
+ ) -> List[int]:
240
+ """
241
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
242
+ adding special tokens. A BERT sequence has the following format:
243
+
244
+ - single sequence: `[CLS] X [SEP]`
245
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
246
+
247
+ Args:
248
+ token_ids_0 (`List[int]`):
249
+ List of IDs to which the special tokens will be added.
250
+ token_ids_1 (`List[int]`, *optional*):
251
+ Optional second list of IDs for sequence pairs.
252
+
253
+ Returns:
254
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
255
+ """
256
+ prefix_tokens = self.get_prefix_tokens()
257
+ token_ids_0 = prefix_tokens + token_ids_0
258
+ if token_ids_1 is not None:
259
+ token_ids_0 = token_ids_0 + token_ids_1 + [self.convert_tokens_to_ids("<eos>")]
260
+ return token_ids_0
261
+
262
+ def _pad(
263
+ self,
264
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
265
+ max_length: Optional[int] = None,
266
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
267
+ pad_to_multiple_of: Optional[int] = None,
268
+ return_attention_mask: Optional[bool] = None,
269
+ ) -> dict:
270
+ """
271
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
272
+
273
+ Args:
274
+ encoded_inputs:
275
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
276
+ max_length: maximum length of the returned list and optionally padding length (see below).
277
+ Will truncate by taking into account the special tokens.
278
+ padding_strategy: PaddingStrategy to use for padding.
279
+
280
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
281
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
282
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
283
+ The tokenizer padding sides are defined in self.padding_side:
284
+
285
+ - 'left': pads on the left of the sequences
286
+ - 'right': pads on the right of the sequences
287
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
288
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
289
+ `>= 7.5` (Volta).
290
+ return_attention_mask:
291
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
292
+ """
293
+ # Load from model defaults
294
+ assert self.padding_side == "left"
295
+
296
+ required_input = encoded_inputs[self.model_input_names[0]]
297
+ seq_length = len(required_input)
298
+
299
+ if padding_strategy == PaddingStrategy.LONGEST:
300
+ max_length = len(required_input)
301
+
302
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
303
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
304
+
305
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
306
+
307
+ # Initialize attention mask if not present.
308
+ if "attention_mask" not in encoded_inputs:
309
+ encoded_inputs["attention_mask"] = [1] * seq_length
310
+
311
+ if "position_ids" not in encoded_inputs:
312
+ encoded_inputs["position_ids"] = list(range(seq_length))
313
+
314
+ if needs_to_be_padded:
315
+ difference = max_length - len(required_input)
316
+
317
+ if "attention_mask" in encoded_inputs:
318
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
319
+ if "position_ids" in encoded_inputs:
320
+ encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
321
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
322
+
323
+ return encoded_inputs
lora/sft/checkpoint-112/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a493598071550244b2ee7f26118f3edec2150b9dfa967929a99052ac83fe716
3
+ size 2623634
lora/sft/checkpoint-112/tokenizer_config.json ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "151329": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "151330": {
12
+ "content": "[MASK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "151331": {
20
+ "content": "[gMASK]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "151332": {
28
+ "content": "[sMASK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "151333": {
36
+ "content": "<sop>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "151334": {
44
+ "content": "<eop>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "151335": {
52
+ "content": "<|system|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "151336": {
60
+ "content": "<|user|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "151337": {
68
+ "content": "<|assistant|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "151338": {
76
+ "content": "<|observation|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "151339": {
84
+ "content": "<|begin_of_image|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "151340": {
92
+ "content": "<|end_of_image|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "151341": {
100
+ "content": "<|begin_of_video|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "151342": {
108
+ "content": "<|end_of_video|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ }
115
+ },
116
+ "additional_special_tokens": [
117
+ "<|endoftext|>",
118
+ "[MASK]",
119
+ "[gMASK]",
120
+ "[sMASK]",
121
+ "<sop>",
122
+ "<eop>",
123
+ "<|system|>",
124
+ "<|user|>",
125
+ "<|assistant|>",
126
+ "<|observation|>",
127
+ "<|begin_of_image|>",
128
+ "<|end_of_image|>",
129
+ "<|begin_of_video|>",
130
+ "<|end_of_video|>"
131
+ ],
132
+ "auto_map": {
133
+ "AutoTokenizer": [
134
+ "tokenization_chatglm.ChatGLM4Tokenizer",
135
+ null
136
+ ]
137
+ },
138
+ "chat_template": "{{ '[gMASK]<sop>' }}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|system|>\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + content + '<|assistant|>' }}{% elif message['role'] == 'assistant' %}{{ '\n' + content }}{% endif %}{% endfor %}",
139
+ "clean_up_tokenization_spaces": false,
140
+ "do_lower_case": false,
141
+ "eos_token": "<|endoftext|>",
142
+ "model_max_length": 128000,
143
+ "pad_token": "<|endoftext|>",
144
+ "padding_side": "right",
145
+ "remove_space": false,
146
+ "split_special_tokens": false,
147
+ "tokenizer_class": "ChatGLM4Tokenizer"
148
+ }
lora/sft/checkpoint-112/trainer_state.json ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9955555555555555,
5
+ "eval_steps": 500,
6
+ "global_step": 112,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08888888888888889,
13
+ "grad_norm": 7.216278553009033,
14
+ "learning_rate": 8.333333333333334e-06,
15
+ "loss": 2.4521,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.17777777777777778,
20
+ "grad_norm": 5.572214603424072,
21
+ "learning_rate": 9.842915805643156e-06,
22
+ "loss": 2.1638,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.26666666666666666,
27
+ "grad_norm": 2.498288631439209,
28
+ "learning_rate": 9.221639627510076e-06,
29
+ "loss": 1.9103,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.35555555555555557,
34
+ "grad_norm": 3.3242993354797363,
35
+ "learning_rate": 8.18711994874345e-06,
36
+ "loss": 1.7952,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.4444444444444444,
41
+ "grad_norm": 2.2792470455169678,
42
+ "learning_rate": 6.840622763423391e-06,
43
+ "loss": 1.6722,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.5333333333333333,
48
+ "grad_norm": 2.598036289215088,
49
+ "learning_rate": 5.3139525976465675e-06,
50
+ "loss": 1.6154,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.6222222222222222,
55
+ "grad_norm": 2.531435489654541,
56
+ "learning_rate": 3.756550564175727e-06,
57
+ "loss": 1.6036,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.7111111111111111,
62
+ "grad_norm": 2.560697317123413,
63
+ "learning_rate": 2.320866025105016e-06,
64
+ "loss": 1.5899,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.8,
69
+ "grad_norm": 2.3769493103027344,
70
+ "learning_rate": 1.1474337861210543e-06,
71
+ "loss": 1.6193,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.8888888888888888,
76
+ "grad_norm": 2.1420528888702393,
77
+ "learning_rate": 3.511175705587433e-07,
78
+ "loss": 1.569,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.9777777777777777,
83
+ "grad_norm": 1.8092883825302124,
84
+ "learning_rate": 9.866357858642206e-09,
85
+ "loss": 1.5909,
86
+ "step": 110
87
+ }
88
+ ],
89
+ "logging_steps": 10,
90
+ "max_steps": 112,
91
+ "num_input_tokens_seen": 0,
92
+ "num_train_epochs": 1,
93
+ "save_steps": 500,
94
+ "stateful_callbacks": {
95
+ "TrainerControl": {
96
+ "args": {
97
+ "should_epoch_stop": false,
98
+ "should_evaluate": false,
99
+ "should_log": false,
100
+ "should_save": true,
101
+ "should_training_stop": true
102
+ },
103
+ "attributes": {}
104
+ }
105
+ },
106
+ "total_flos": 1.2980476361834496e+16,
107
+ "train_batch_size": 1,
108
+ "trial_name": null,
109
+ "trial_params": null
110
+ }
lora/sft/checkpoint-112/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a91a4ed932e171381acdd219106313852fe2612831e5273940b143e95ee3af6f
3
+ size 5304
lora/sft/eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9955555555555555,
3
+ "eval_loss": 1.5198827981948853,
4
+ "eval_runtime": 11.7693,
5
+ "eval_samples_per_second": 8.497,
6
+ "eval_steps_per_second": 8.497
7
+ }
lora/sft/special_tokens_map.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "[MASK]",
5
+ "[gMASK]",
6
+ "[sMASK]",
7
+ "<sop>",
8
+ "<eop>",
9
+ "<|system|>",
10
+ "<|user|>",
11
+ "<|assistant|>",
12
+ "<|observation|>",
13
+ "<|begin_of_image|>",
14
+ "<|end_of_image|>",
15
+ "<|begin_of_video|>",
16
+ "<|end_of_video|>"
17
+ ],
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<|endoftext|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ }
32
+ }
lora/sft/tokenization_chatglm.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import regex as re
2
+ import base64
3
+ import os
4
+ import json
5
+ import tiktoken
6
+ from torch import TensorType
7
+ from typing import List, Optional, Union, Dict, Any
8
+ from transformers import PreTrainedTokenizer
9
+ from transformers.utils import logging, PaddingStrategy
10
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
11
+
12
+
13
+ class ChatGLM4Tokenizer(PreTrainedTokenizer):
14
+ vocab_files_names = {"vocab_file": "tokenizer.model"}
15
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
16
+
17
+ def __init__(
18
+ self,
19
+ vocab_file,
20
+ padding_side="left",
21
+ clean_up_tokenization_spaces=False,
22
+ encode_special_tokens=False,
23
+ **kwargs
24
+ ):
25
+ self.name = "GLM4Tokenizer"
26
+ self.vocab_file = vocab_file
27
+ pat_str = "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
28
+ self.pat_str = re.compile(pat_str)
29
+ self.encode_special_tokens = encode_special_tokens
30
+
31
+ mergeable_ranks = {}
32
+ with open(vocab_file) as f:
33
+ for line in f:
34
+ token, rank = line.strip().split()
35
+ rank = int(rank)
36
+ token = base64.b64decode(token)
37
+ mergeable_ranks[token] = rank
38
+
39
+ self.mergeable_ranks = mergeable_ranks
40
+
41
+ self.tokenizer = tiktoken.Encoding(
42
+ name="my_tokenizer",
43
+ pat_str=pat_str,
44
+ mergeable_ranks=mergeable_ranks,
45
+ special_tokens={}
46
+ )
47
+ self.decoder = {rank: token for token, rank in mergeable_ranks.items()}
48
+ self.n_words = len(self.decoder)
49
+
50
+ super().__init__(
51
+ padding_side=padding_side,
52
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
53
+ **kwargs
54
+ )
55
+
56
+ @property
57
+ def vocab_size(self):
58
+ return self.n_words
59
+
60
+ def get_vocab(self):
61
+ """ Returns vocab as a dict """
62
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
63
+ vocab.update(self.added_tokens_encoder)
64
+ return vocab
65
+
66
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str, int]]) -> str:
67
+ """
68
+ Converts a sequence of tokens in a single string.
69
+ """
70
+ text = ""
71
+ temp = b""
72
+ for t in tokens:
73
+ if isinstance(t, int):
74
+ t = chr(t)
75
+ if isinstance(t, str):
76
+ if temp:
77
+ text += temp.decode("utf-8", errors="replace")
78
+ elif isinstance(t, bytes):
79
+ temp += t
80
+ else:
81
+ raise TypeError("token should only be of type int, bytes or str")
82
+ if temp:
83
+ text += temp.decode("utf-8", errors="replace")
84
+ return text
85
+
86
+ def _tokenize(self, text, **kwargs):
87
+ tokens = []
88
+ ids = self.tokenizer.encode(text)
89
+ for t in ids:
90
+ tokens.append(self.decoder[t])
91
+ return tokens
92
+
93
+ def _convert_token_to_id(self, token):
94
+ """ Converts a token (str) in an id using the vocab. """
95
+ return self.mergeable_ranks[token]
96
+
97
+ def _convert_id_to_token(self, index):
98
+ """Converts an index (integer) in a token (str) using the vocab."""
99
+ return self.decoder.get(index, "")
100
+
101
+ def save_vocabulary(self, save_directory, filename_prefix=None):
102
+ """
103
+ Save the vocabulary and special tokens file to a directory.
104
+
105
+ Args:
106
+ save_directory (`str`):
107
+ The directory in which to save the vocabulary.
108
+ filename_prefix (`str`, *optional*):
109
+ An optional prefix to add to the named of the saved files.
110
+
111
+ Returns:
112
+ `Tuple(str)`: Paths to the files saved.
113
+ """
114
+ if os.path.isdir(save_directory):
115
+ vocab_file = os.path.join(
116
+ save_directory, self.vocab_files_names["vocab_file"]
117
+ )
118
+ else:
119
+ vocab_file = save_directory
120
+
121
+ with open(self.vocab_file, 'rb') as fin:
122
+ proto_str = fin.read()
123
+
124
+ with open(vocab_file, "wb") as writer:
125
+ writer.write(proto_str)
126
+
127
+ return (vocab_file,)
128
+
129
+ def get_prefix_tokens(self):
130
+ prefix_tokens = [self.convert_tokens_to_ids("[gMASK]"), self.convert_tokens_to_ids("<sop>")]
131
+ return prefix_tokens
132
+
133
+ def build_single_message(self, role, metadata, message, tokenize=True):
134
+ assert role in ["system", "user", "assistant", "observation"], role
135
+ if tokenize:
136
+ role_tokens = [self.convert_tokens_to_ids(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n",
137
+ disallowed_special=())
138
+ message_tokens = self.tokenizer.encode(message, disallowed_special=())
139
+ tokens = role_tokens + message_tokens
140
+ return tokens
141
+ else:
142
+ return str(f"<|{role}|>{metadata}\n{message}")
143
+
144
+ # Use Jinja Template in tokenizer_config.json
145
+ # def apply_chat_template(
146
+ # self,
147
+ # conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]], "Conversation"],
148
+ # add_generation_prompt: bool = False,
149
+ # tokenize: bool = True,
150
+ # padding: bool = False,
151
+ # truncation: bool = False,
152
+ # max_length: Optional[int] = None,
153
+ # return_tensors: Optional[Union[str, TensorType]] = None,
154
+ # return_dict: bool = False,
155
+ # tokenizer_kwargs: Optional[Dict[str, Any]] = None,
156
+ # add_special_tokens: bool = True,
157
+ # **kwargs,
158
+ # ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:
159
+ #
160
+ # if return_dict and not tokenize:
161
+ # raise ValueError(
162
+ # "`return_dict=True` is incompatible with `tokenize=False`, because there is no dict "
163
+ # "of tokenizer outputs to return."
164
+ # )
165
+ #
166
+ # def handle_single_conversation(conversation):
167
+ # input_ids = self.get_prefix_tokens() if add_special_tokens else []
168
+ # input_message = "[gMASK]<sop>" if add_special_tokens else ""
169
+ # for item in conversation:
170
+ # if item.get("tools"):
171
+ # tools = item["tools"]
172
+ # content = "你是一个名为 GhatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。"
173
+ # content += "\n\n# 可用工具"
174
+ # for tool in tools:
175
+ # if tool["type"] == "function":
176
+ # function = tool["function"]
177
+ # content += f"\n\n## {function['name']}\n\n{json.dumps(function, ensure_ascii=False, indent=4)}"
178
+ # content += "\n在调用上述函数时,请使用 Json 格式表示调用的参数。"
179
+ # elif tool["type"] == "python":
180
+ # content += "\n\n## python\n\n当你向 `python` 发送包含 Python 代码的消息时,该代码将会在一个有状态的 Jupyter notebook 环境中执行。\n`python` 返回代码执行的输出,或在执行 60 秒后返回超时。\n`/mnt/data` 将会持久化存储你的文件。在此会话中,`python` 无法访问互联网。不要使用 `python` 进行任何网络请求或者在线 API 调用,这些在线内容的访问将不会成功。"
181
+ # elif tool["type"] == "simple_browser":
182
+ # content += "\n\n## simple_browser\n\n你可以使用 `simple_browser` 工具。该工具支持以下函数:\n`search(query: str, recency_days: int)`:使用搜索引擎进行查询并显示结果,可以使用 `recency_days` 参数控制搜索内容的时效性。\n`mclick(ids: list[int])`:获取一系列指定 id 的页面内容。每次调用时,须选择3-10个页面。选择多个角度的页面,同时尽可能选择可信任的信息来源。考虑到部分页面是无法加载的,你也可以多打开一些可能有用的页面而不用担心内容过多。\n`open_url(url: str)`:打开指定的 URL。\n\n使用 `【{引用 id}†{引用文本}】` 来引用内容。\n\n操作步骤:1. 使用 `search` 来获得信息列表; 2. 使用 `mclick` 来获取指定 ID 页面的内容; 3. 根据获得的内容进行回复。在回复中应当引用信息来源。\n 如果用户提供了 URL,也可以用 `open_url` 直接打开页面。\n如果初次搜索结果没有找到合适的信息,也可以再次使用 `search` 进行搜索。"
183
+ # elif tool["type"] == "cogview":
184
+ # content += "\n\n## cogview\n\n如果用户的请求中包含了对图像的描述,你可以使用 `cogview` 来生成图像并展示给用户。你需要向 `cogview` 发送图像描述,规则:\n- 发送给 `cogview` 的消息必须使用英语。用户的中文描述必须完全翻译为英语。\n- 应当尽可能详细地描述图像生成的需求,需求描述约 100 英文单词。\n- 保持用户原始描述的意图。不要虚构内容或者没见过的人物。\n- 如无特殊说明,所在地为中国,持有中国立场并遵循中国社会主义价值观。"
185
+ # else:
186
+ # raise NotImplementedError(f"Unknown tool type {tool['type']}")
187
+ # input = self.build_single_message("system", "", content, tokenize=tokenize)
188
+ # if tokenize:
189
+ # input_ids.extend(input)
190
+ # else:
191
+ # input_message += input
192
+ # if item["content"]:
193
+ # input = self.build_single_message(
194
+ # item["role"],
195
+ # item.get("metadata", ""),
196
+ # item["content"],
197
+ # tokenize=tokenize
198
+ # )
199
+ # if tokenize:
200
+ # input_ids.extend(input)
201
+ # else:
202
+ # input_message += input
203
+ # if add_generation_prompt:
204
+ # if tokenize:
205
+ # input_ids.extend([self.convert_tokens_to_ids("<|assistant|>")])
206
+ # else:
207
+ # input_message += "<|assistant|>"
208
+ # return input_ids if tokenize else input_message
209
+ #
210
+ # # Main logic to handle different conversation formats
211
+ # if isinstance(conversation, list) and all(isinstance(i, dict) for i in conversation):
212
+ # result = handle_single_conversation(conversation)
213
+ # elif isinstance(conversation, list) and all(isinstance(i, list) for i in conversation):
214
+ # result = [handle_single_conversation(c) for c in conversation]
215
+ # elif hasattr(conversation, "messages"):
216
+ # result = handle_single_conversation(conversation.messages)
217
+ # else:
218
+ # raise ValueError("Invalid conversation format")
219
+ #
220
+ # if tokenize:
221
+ # output = self.batch_encode_plus(
222
+ # [result] if isinstance(result[0], int) else result,
223
+ # padding=padding,
224
+ # truncation=truncation,
225
+ # max_length=max_length,
226
+ # return_tensors=return_tensors,
227
+ # is_split_into_words=True,
228
+ # add_special_tokens=False
229
+ # )
230
+ # if return_dict:
231
+ # return output
232
+ # else:
233
+ # return output["input_ids"]
234
+ # else:
235
+ # return result
236
+
237
+ def build_inputs_with_special_tokens(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
239
+ ) -> List[int]:
240
+ """
241
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
242
+ adding special tokens. A BERT sequence has the following format:
243
+
244
+ - single sequence: `[CLS] X [SEP]`
245
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
246
+
247
+ Args:
248
+ token_ids_0 (`List[int]`):
249
+ List of IDs to which the special tokens will be added.
250
+ token_ids_1 (`List[int]`, *optional*):
251
+ Optional second list of IDs for sequence pairs.
252
+
253
+ Returns:
254
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
255
+ """
256
+ prefix_tokens = self.get_prefix_tokens()
257
+ token_ids_0 = prefix_tokens + token_ids_0
258
+ if token_ids_1 is not None:
259
+ token_ids_0 = token_ids_0 + token_ids_1 + [self.convert_tokens_to_ids("<eos>")]
260
+ return token_ids_0
261
+
262
+ def _pad(
263
+ self,
264
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
265
+ max_length: Optional[int] = None,
266
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
267
+ pad_to_multiple_of: Optional[int] = None,
268
+ return_attention_mask: Optional[bool] = None,
269
+ ) -> dict:
270
+ """
271
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
272
+
273
+ Args:
274
+ encoded_inputs:
275
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
276
+ max_length: maximum length of the returned list and optionally padding length (see below).
277
+ Will truncate by taking into account the special tokens.
278
+ padding_strategy: PaddingStrategy to use for padding.
279
+
280
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
281
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
282
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
283
+ The tokenizer padding sides are defined in self.padding_side:
284
+
285
+ - 'left': pads on the left of the sequences
286
+ - 'right': pads on the right of the sequences
287
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
288
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
289
+ `>= 7.5` (Volta).
290
+ return_attention_mask:
291
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
292
+ """
293
+ # Load from model defaults
294
+ assert self.padding_side == "left"
295
+
296
+ required_input = encoded_inputs[self.model_input_names[0]]
297
+ seq_length = len(required_input)
298
+
299
+ if padding_strategy == PaddingStrategy.LONGEST:
300
+ max_length = len(required_input)
301
+
302
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
303
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
304
+
305
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
306
+
307
+ # Initialize attention mask if not present.
308
+ if "attention_mask" not in encoded_inputs:
309
+ encoded_inputs["attention_mask"] = [1] * seq_length
310
+
311
+ if "position_ids" not in encoded_inputs:
312
+ encoded_inputs["position_ids"] = list(range(seq_length))
313
+
314
+ if needs_to_be_padded:
315
+ difference = max_length - len(required_input)
316
+
317
+ if "attention_mask" in encoded_inputs:
318
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
319
+ if "position_ids" in encoded_inputs:
320
+ encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
321
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
322
+
323
+ return encoded_inputs
lora/sft/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a493598071550244b2ee7f26118f3edec2150b9dfa967929a99052ac83fe716
3
+ size 2623634
lora/sft/tokenizer_config.json ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "151329": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "151330": {
12
+ "content": "[MASK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "151331": {
20
+ "content": "[gMASK]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "151332": {
28
+ "content": "[sMASK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "151333": {
36
+ "content": "<sop>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "151334": {
44
+ "content": "<eop>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "151335": {
52
+ "content": "<|system|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "151336": {
60
+ "content": "<|user|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "151337": {
68
+ "content": "<|assistant|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "151338": {
76
+ "content": "<|observation|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "151339": {
84
+ "content": "<|begin_of_image|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "151340": {
92
+ "content": "<|end_of_image|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "151341": {
100
+ "content": "<|begin_of_video|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "151342": {
108
+ "content": "<|end_of_video|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ }
115
+ },
116
+ "additional_special_tokens": [
117
+ "<|endoftext|>",
118
+ "[MASK]",
119
+ "[gMASK]",
120
+ "[sMASK]",
121
+ "<sop>",
122
+ "<eop>",
123
+ "<|system|>",
124
+ "<|user|>",
125
+ "<|assistant|>",
126
+ "<|observation|>",
127
+ "<|begin_of_image|>",
128
+ "<|end_of_image|>",
129
+ "<|begin_of_video|>",
130
+ "<|end_of_video|>"
131
+ ],
132
+ "auto_map": {
133
+ "AutoTokenizer": [
134
+ "tokenization_chatglm.ChatGLM4Tokenizer",
135
+ null
136
+ ]
137
+ },
138
+ "chat_template": "{{ '[gMASK]<sop>' }}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|system|>\n' + system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + content + '<|assistant|>' }}{% elif message['role'] == 'assistant' %}{{ '\n' + content }}{% endif %}{% endfor %}",
139
+ "clean_up_tokenization_spaces": false,
140
+ "do_lower_case": false,
141
+ "eos_token": "<|endoftext|>",
142
+ "model_max_length": 128000,
143
+ "pad_token": "<|endoftext|>",
144
+ "padding_side": "right",
145
+ "remove_space": false,
146
+ "split_special_tokens": false,
147
+ "tokenizer_class": "ChatGLM4Tokenizer"
148
+ }
lora/sft/train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9955555555555555,
3
+ "total_flos": 1.2980476361834496e+16,
4
+ "train_loss": 1.7745797293526786,
5
+ "train_runtime": 341.4533,
6
+ "train_samples_per_second": 2.636,
7
+ "train_steps_per_second": 0.328
8
+ }
lora/sft/trainer_log.jsonl ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 112, "loss": 2.4521, "learning_rate": 8.333333333333334e-06, "epoch": 0.08888888888888889, "percentage": 8.93, "elapsed_time": "0:00:32", "remaining_time": "0:05:29", "throughput": "0.00", "total_tokens": 0}
2
+ {"current_steps": 20, "total_steps": 112, "loss": 2.1638, "learning_rate": 9.842915805643156e-06, "epoch": 0.17777777777777778, "percentage": 17.86, "elapsed_time": "0:01:01", "remaining_time": "0:04:44", "throughput": "0.00", "total_tokens": 0}
3
+ {"current_steps": 30, "total_steps": 112, "loss": 1.9103, "learning_rate": 9.221639627510076e-06, "epoch": 0.26666666666666666, "percentage": 26.79, "elapsed_time": "0:01:31", "remaining_time": "0:04:09", "throughput": "0.00", "total_tokens": 0}
4
+ {"current_steps": 40, "total_steps": 112, "loss": 1.7952, "learning_rate": 8.18711994874345e-06, "epoch": 0.35555555555555557, "percentage": 35.71, "elapsed_time": "0:02:00", "remaining_time": "0:03:36", "throughput": "0.00", "total_tokens": 0}
5
+ {"current_steps": 50, "total_steps": 112, "loss": 1.6722, "learning_rate": 6.840622763423391e-06, "epoch": 0.4444444444444444, "percentage": 44.64, "elapsed_time": "0:02:31", "remaining_time": "0:03:08", "throughput": "0.00", "total_tokens": 0}
6
+ {"current_steps": 60, "total_steps": 112, "loss": 1.6154, "learning_rate": 5.3139525976465675e-06, "epoch": 0.5333333333333333, "percentage": 53.57, "elapsed_time": "0:03:01", "remaining_time": "0:02:37", "throughput": "0.00", "total_tokens": 0}
7
+ {"current_steps": 70, "total_steps": 112, "loss": 1.6036, "learning_rate": 3.756550564175727e-06, "epoch": 0.6222222222222222, "percentage": 62.5, "elapsed_time": "0:03:31", "remaining_time": "0:02:07", "throughput": "0.00", "total_tokens": 0}
8
+ {"current_steps": 80, "total_steps": 112, "loss": 1.5899, "learning_rate": 2.320866025105016e-06, "epoch": 0.7111111111111111, "percentage": 71.43, "elapsed_time": "0:04:01", "remaining_time": "0:01:36", "throughput": "0.00", "total_tokens": 0}
9
+ {"current_steps": 90, "total_steps": 112, "loss": 1.6193, "learning_rate": 1.1474337861210543e-06, "epoch": 0.8, "percentage": 80.36, "elapsed_time": "0:04:32", "remaining_time": "0:01:06", "throughput": "0.00", "total_tokens": 0}
10
+ {"current_steps": 100, "total_steps": 112, "loss": 1.569, "learning_rate": 3.511175705587433e-07, "epoch": 0.8888888888888888, "percentage": 89.29, "elapsed_time": "0:05:03", "remaining_time": "0:00:36", "throughput": "0.00", "total_tokens": 0}
11
+ {"current_steps": 110, "total_steps": 112, "loss": 1.5909, "learning_rate": 9.866357858642206e-09, "epoch": 0.9777777777777777, "percentage": 98.21, "elapsed_time": "0:05:33", "remaining_time": "0:00:06", "throughput": "0.00", "total_tokens": 0}
12
+ {"current_steps": 112, "total_steps": 112, "epoch": 0.9955555555555555, "percentage": 100.0, "elapsed_time": "0:05:41", "remaining_time": "0:00:00", "throughput": "0.00", "total_tokens": 0}
lora/sft/trainer_state.json ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9955555555555555,
5
+ "eval_steps": 500,
6
+ "global_step": 112,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08888888888888889,
13
+ "grad_norm": 7.216278553009033,
14
+ "learning_rate": 8.333333333333334e-06,
15
+ "loss": 2.4521,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.17777777777777778,
20
+ "grad_norm": 5.572214603424072,
21
+ "learning_rate": 9.842915805643156e-06,
22
+ "loss": 2.1638,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.26666666666666666,
27
+ "grad_norm": 2.498288631439209,
28
+ "learning_rate": 9.221639627510076e-06,
29
+ "loss": 1.9103,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.35555555555555557,
34
+ "grad_norm": 3.3242993354797363,
35
+ "learning_rate": 8.18711994874345e-06,
36
+ "loss": 1.7952,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.4444444444444444,
41
+ "grad_norm": 2.2792470455169678,
42
+ "learning_rate": 6.840622763423391e-06,
43
+ "loss": 1.6722,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.5333333333333333,
48
+ "grad_norm": 2.598036289215088,
49
+ "learning_rate": 5.3139525976465675e-06,
50
+ "loss": 1.6154,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.6222222222222222,
55
+ "grad_norm": 2.531435489654541,
56
+ "learning_rate": 3.756550564175727e-06,
57
+ "loss": 1.6036,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.7111111111111111,
62
+ "grad_norm": 2.560697317123413,
63
+ "learning_rate": 2.320866025105016e-06,
64
+ "loss": 1.5899,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.8,
69
+ "grad_norm": 2.3769493103027344,
70
+ "learning_rate": 1.1474337861210543e-06,
71
+ "loss": 1.6193,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.8888888888888888,
76
+ "grad_norm": 2.1420528888702393,
77
+ "learning_rate": 3.511175705587433e-07,
78
+ "loss": 1.569,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.9777777777777777,
83
+ "grad_norm": 1.8092883825302124,
84
+ "learning_rate": 9.866357858642206e-09,
85
+ "loss": 1.5909,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.9955555555555555,
90
+ "step": 112,
91
+ "total_flos": 1.2980476361834496e+16,
92
+ "train_loss": 1.7745797293526786,
93
+ "train_runtime": 341.4533,
94
+ "train_samples_per_second": 2.636,
95
+ "train_steps_per_second": 0.328
96
+ }
97
+ ],
98
+ "logging_steps": 10,
99
+ "max_steps": 112,
100
+ "num_input_tokens_seen": 0,
101
+ "num_train_epochs": 1,
102
+ "save_steps": 500,
103
+ "stateful_callbacks": {
104
+ "TrainerControl": {
105
+ "args": {
106
+ "should_epoch_stop": false,
107
+ "should_evaluate": false,
108
+ "should_log": false,
109
+ "should_save": true,
110
+ "should_training_stop": true
111
+ },
112
+ "attributes": {}
113
+ }
114
+ },
115
+ "total_flos": 1.2980476361834496e+16,
116
+ "train_batch_size": 1,
117
+ "trial_name": null,
118
+ "trial_params": null
119
+ }
lora/sft/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a91a4ed932e171381acdd219106313852fe2612831e5273940b143e95ee3af6f
3
+ size 5304
lora/sft/training_loss.png ADDED