sci-m-wang commited on
Commit
eb518e3
·
verified ·
1 Parent(s): cb46c10

Upload 15 files

Browse files
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ library_name: peft
4
+ tags:
5
+ - llama-factory
6
+ - lora
7
+ - generated_from_trainer
8
+ base_model: THUDM/chatglm3-6b
9
+ model-index:
10
+ - name: LangGPT
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # LangGPT
18
+
19
+ This model is a fine-tuned version of [/datas/huangyijie/my_model/chatglm3-6b](https://huggingface.co//datas/huangyijie/my_model/chatglm3-6b) on the LangGPT dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.8991
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 12
42
+ - eval_batch_size: 4
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 96
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_steps: 20
49
+ - num_epochs: 9.0
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss |
55
+ |:-------------:|:-----:|:----:|:---------------:|
56
+ | 1.0558 | 1.25 | 100 | 1.0500 |
57
+ | 0.9566 | 2.5 | 200 | 0.9630 |
58
+ | 0.9082 | 3.75 | 300 | 0.9288 |
59
+ | 0.8992 | 5.0 | 400 | 0.9108 |
60
+ | 0.8874 | 6.25 | 500 | 0.9028 |
61
+ | 0.8835 | 7.5 | 600 | 0.8997 |
62
+ | 0.8912 | 8.75 | 700 | 0.8991 |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - PEFT 0.10.0
68
+ - Transformers 4.40.1
69
+ - Pytorch 2.2.0+cu121
70
+ - Datasets 2.16.1
71
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "THUDM/chatglm3-6b",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "query_key_value"
24
+ ],
25
+ "task_type": "CAUSAL_LM",
26
+ "use_dora": false,
27
+ "use_rslora": false
28
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:404f0a92a53d60d0eb9cfabc139cc99740bb4ff315929bf64fcccd2fead20436
3
+ size 7807744
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "eval_loss": 0.8991448283195496,
4
+ "eval_runtime": 353.2745,
5
+ "eval_samples_per_second": 2.417,
6
+ "eval_steps_per_second": 0.606,
7
+ "total_flos": 2.5580424283828716e+18,
8
+ "train_loss": 0.9693152533637153,
9
+ "train_runtime": 60193.9387,
10
+ "train_samples_per_second": 1.148,
11
+ "train_steps_per_second": 0.012
12
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "eval_loss": 0.8991448283195496,
4
+ "eval_runtime": 353.2745,
5
+ "eval_samples_per_second": 2.417,
6
+ "eval_steps_per_second": 0.606
7
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|user|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|observation|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ]
18
+ }
tokenization_chatglm.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from typing import List, Optional, Union, Dict
5
+ from sentencepiece import SentencePieceProcessor
6
+ from transformers import PreTrainedTokenizer
7
+ from transformers.utils import logging, PaddingStrategy
8
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
9
+
10
+
11
+ logger = logging.get_logger(__name__)
12
+
13
+
14
+ class SPTokenizer:
15
+ def __init__(self, model_path: str):
16
+ # reload tokenizer
17
+ assert os.path.isfile(model_path), model_path
18
+ self.sp_model = SentencePieceProcessor(model_file=model_path)
19
+
20
+ # BOS / EOS token IDs
21
+ self.n_words: int = self.sp_model.vocab_size()
22
+ self.bos_id: int = self.sp_model.bos_id()
23
+ self.eos_id: int = self.sp_model.eos_id()
24
+ self.pad_id: int = self.sp_model.unk_id()
25
+ assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
26
+
27
+ role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
28
+ special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
29
+ self.special_tokens = {}
30
+ self.index_special_tokens = {}
31
+ for token in special_tokens:
32
+ self.special_tokens[token] = self.n_words
33
+ self.index_special_tokens[self.n_words] = token
34
+ self.n_words += 1
35
+ self.role_special_token_expression = "|".join([re.escape(token) for token in special_tokens]) # for apply_chat_template
36
+
37
+ def tokenize(self, s: str, encode_special_tokens=False):
38
+ if encode_special_tokens:
39
+ last_index = 0
40
+ t = []
41
+ for match in re.finditer(self.role_special_token_expression, s):
42
+ if last_index < match.start():
43
+ t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()]))
44
+ t.append(s[match.start():match.end()])
45
+ last_index = match.end()
46
+ if last_index < len(s):
47
+ t.extend(self.sp_model.EncodeAsPieces(s[last_index:]))
48
+ return t
49
+ else:
50
+ return self.sp_model.EncodeAsPieces(s)
51
+
52
+ def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]:
53
+ assert type(s) is str
54
+ t = self.sp_model.encode(s)
55
+ if bos:
56
+ t = [self.bos_id] + t
57
+ if eos:
58
+ t = t + [self.eos_id]
59
+ return t
60
+
61
+ def decode(self, t: List[int]) -> str:
62
+ text, buffer = "", []
63
+ for token in t:
64
+ if token in self.index_special_tokens:
65
+ if buffer:
66
+ text += self.sp_model.decode(buffer)
67
+ buffer = []
68
+ text += self.index_special_tokens[token]
69
+ else:
70
+ buffer.append(token)
71
+ if buffer:
72
+ text += self.sp_model.decode(buffer)
73
+ return text
74
+
75
+ def decode_tokens(self, tokens: List[str]) -> str:
76
+ text = self.sp_model.DecodePieces(tokens)
77
+ return text
78
+
79
+ def convert_token_to_id(self, token):
80
+ """ Converts a token (str) in an id using the vocab. """
81
+ if token in self.special_tokens:
82
+ return self.special_tokens[token]
83
+ return self.sp_model.PieceToId(token)
84
+
85
+ def convert_id_to_token(self, index):
86
+ """Converts an index (integer) in a token (str) using the vocab."""
87
+ if index in self.index_special_tokens:
88
+ return self.index_special_tokens[index]
89
+ if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size():
90
+ return ""
91
+ return self.sp_model.IdToPiece(index)
92
+
93
+
94
+ class ChatGLMTokenizer(PreTrainedTokenizer):
95
+
96
+ vocab_files_names = {"vocab_file": "tokenizer.model"}
97
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
98
+
99
+ def __init__(
100
+ self,
101
+ vocab_file,
102
+ padding_side="left",
103
+ clean_up_tokenization_spaces=False,
104
+ encode_special_tokens=False,
105
+ **kwargs
106
+ ):
107
+ self.name = "GLMTokenizer"
108
+ self.vocab_file = vocab_file
109
+ self.tokenizer = SPTokenizer(vocab_file)
110
+ self.special_tokens = {
111
+ "<bos>": self.tokenizer.bos_id,
112
+ "<eos>": self.tokenizer.eos_id,
113
+ "<unk>": self.tokenizer.pad_id,
114
+ "<pad>": self.tokenizer.pad_id
115
+ }
116
+ self.encode_special_tokens = encode_special_tokens
117
+
118
+ super().__init__(
119
+ padding_side=padding_side,
120
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
121
+ **kwargs
122
+ )
123
+
124
+ def get_command(self, token):
125
+ if token in self.special_tokens:
126
+ return self.special_tokens[token]
127
+ assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}"
128
+ return self.tokenizer.special_tokens[token]
129
+
130
+ @property
131
+ def unk_token(self) -> str:
132
+ return self.tokenizer.sp_model.IdToPiece(self.get_command("<unk>"))
133
+
134
+ @property
135
+ def pad_token(self) -> str:
136
+ return self.tokenizer.sp_model.IdToPiece(self.get_command("<pad>"))
137
+
138
+ @property
139
+ def eos_token(self) -> str:
140
+ return self.tokenizer.sp_model.IdToPiece(self.get_command("<eos>"))
141
+
142
+ @property
143
+ def unk_token_id(self) -> int:
144
+ return self.get_command("<unk>")
145
+
146
+ @property
147
+ def pad_token_id(self) -> int:
148
+ return self.get_command("<pad>")
149
+
150
+ @property
151
+ def eos_token_id(self):
152
+ return self.get_command("<eos>")
153
+
154
+ @unk_token.setter
155
+ def unk_token(self, value):
156
+ logger.warning("Setting unk_token is not supported, use the default one.")
157
+
158
+ @pad_token.setter
159
+ def pad_token(self, value):
160
+ logger.warning("Setting pad_token is not supported, use the default one.")
161
+
162
+ @eos_token.setter
163
+ def eos_token(self, value):
164
+ logger.warning("Setting eos_token is not supported, use the default one.")
165
+
166
+ @property
167
+ def vocab_size(self):
168
+ return self.tokenizer.n_words
169
+
170
+ def get_vocab(self):
171
+ """ Returns vocab as a dict """
172
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
173
+ vocab.update(self.added_tokens_encoder)
174
+ return vocab
175
+
176
+ def _tokenize(self, text, **kwargs):
177
+ return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens)
178
+
179
+ def _convert_token_to_id(self, token):
180
+ """ Converts a token (str) in an id using the vocab. """
181
+ return self.tokenizer.convert_token_to_id(token)
182
+
183
+ def _convert_id_to_token(self, index):
184
+ """Converts an index (integer) in a token (str) using the vocab."""
185
+ return self.tokenizer.convert_id_to_token(index)
186
+
187
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
188
+ return self.tokenizer.decode_tokens(tokens)
189
+
190
+ def save_vocabulary(self, save_directory, filename_prefix=None):
191
+ """
192
+ Save the vocabulary and special tokens file to a directory.
193
+
194
+ Args:
195
+ save_directory (`str`):
196
+ The directory in which to save the vocabulary.
197
+ filename_prefix (`str`, *optional*):
198
+ An optional prefix to add to the named of the saved files.
199
+
200
+ Returns:
201
+ `Tuple(str)`: Paths to the files saved.
202
+ """
203
+ if os.path.isdir(save_directory):
204
+ vocab_file = os.path.join(
205
+ save_directory, self.vocab_files_names["vocab_file"]
206
+ )
207
+ else:
208
+ vocab_file = save_directory
209
+
210
+ with open(self.vocab_file, 'rb') as fin:
211
+ proto_str = fin.read()
212
+
213
+ with open(vocab_file, "wb") as writer:
214
+ writer.write(proto_str)
215
+
216
+ return (vocab_file,)
217
+
218
+ def get_prefix_tokens(self):
219
+ prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")]
220
+ return prefix_tokens
221
+
222
+ def build_single_message(self, role, metadata, message):
223
+ assert role in ["system", "user", "assistant", "observation"], role
224
+ role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n")
225
+ message_tokens = self.tokenizer.encode(message)
226
+ tokens = role_tokens + message_tokens
227
+ return tokens
228
+
229
+ def build_chat_input(self, query, history=None, role="user"):
230
+ if history is None:
231
+ history = []
232
+ input_ids = []
233
+ for item in history:
234
+ content = item["content"]
235
+ if item["role"] == "system" and "tools" in item:
236
+ content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False)
237
+ input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content))
238
+ input_ids.extend(self.build_single_message(role, "", query))
239
+ input_ids.extend([self.get_command("<|assistant|>")])
240
+ return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True)
241
+
242
+ def build_inputs_with_special_tokens(
243
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
244
+ ) -> List[int]:
245
+ """
246
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
247
+ adding special tokens. A BERT sequence has the following format:
248
+
249
+ - single sequence: `[CLS] X [SEP]`
250
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
251
+
252
+ Args:
253
+ token_ids_0 (`List[int]`):
254
+ List of IDs to which the special tokens will be added.
255
+ token_ids_1 (`List[int]`, *optional*):
256
+ Optional second list of IDs for sequence pairs.
257
+
258
+ Returns:
259
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
260
+ """
261
+ prefix_tokens = self.get_prefix_tokens()
262
+ token_ids_0 = prefix_tokens + token_ids_0
263
+ if token_ids_1 is not None:
264
+ token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("<eos>")]
265
+ return token_ids_0
266
+
267
+ def _pad(
268
+ self,
269
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
270
+ max_length: Optional[int] = None,
271
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
272
+ pad_to_multiple_of: Optional[int] = None,
273
+ return_attention_mask: Optional[bool] = None,
274
+ ) -> dict:
275
+ """
276
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
277
+
278
+ Args:
279
+ encoded_inputs:
280
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
281
+ max_length: maximum length of the returned list and optionally padding length (see below).
282
+ Will truncate by taking into account the special tokens.
283
+ padding_strategy: PaddingStrategy to use for padding.
284
+
285
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
286
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
287
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
288
+ The tokenizer padding sides are defined in self.padding_side:
289
+
290
+ - 'left': pads on the left of the sequences
291
+ - 'right': pads on the right of the sequences
292
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
293
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
294
+ `>= 7.5` (Volta).
295
+ return_attention_mask:
296
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
297
+ """
298
+ # Load from model defaults
299
+ assert self.padding_side == "left"
300
+
301
+ required_input = encoded_inputs[self.model_input_names[0]]
302
+ seq_length = len(required_input)
303
+
304
+ if padding_strategy == PaddingStrategy.LONGEST:
305
+ max_length = len(required_input)
306
+
307
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
308
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
309
+
310
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
311
+
312
+ # Initialize attention mask if not present.
313
+ if "attention_mask" not in encoded_inputs:
314
+ encoded_inputs["attention_mask"] = [1] * seq_length
315
+
316
+ if "position_ids" not in encoded_inputs:
317
+ encoded_inputs["position_ids"] = list(range(seq_length))
318
+
319
+ if needs_to_be_padded:
320
+ difference = max_length - len(required_input)
321
+
322
+ if "attention_mask" in encoded_inputs:
323
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
324
+ if "position_ids" in encoded_inputs:
325
+ encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
326
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
327
+
328
+ return encoded_inputs
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7dc4c393423b76e4373e5157ddc34803a0189ba96b21ddbb40269d31468a6f2
3
+ size 1018370
tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "64790": {
4
+ "content": "[gMASK]",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "64792": {
12
+ "content": "sop",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "64795": {
20
+ "content": "<|user|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "64796": {
28
+ "content": "<|assistant|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": false
34
+ },
35
+ "64797": {
36
+ "content": "<|observation|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [
45
+ "<|user|>",
46
+ "<|observation|>"
47
+ ],
48
+ "auto_map": {
49
+ "AutoTokenizer": [
50
+ "tokenization_chatglm.ChatGLMTokenizer",
51
+ null
52
+ ]
53
+ },
54
+ "chat_template": "{% for message in messages %}{% if loop.first %}[gMASK]sop<|{{ message['role'] }}|>\n {{ message['content'] }}{% else %}<|{{ message['role'] }}|>\n {{ message['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}",
55
+ "clean_up_tokenization_spaces": false,
56
+ "do_lower_case": false,
57
+ "eos_token": "</s>",
58
+ "model_max_length": 1000000000000000019884624838656,
59
+ "pad_token": "<unk>",
60
+ "padding_side": "right",
61
+ "remove_space": false,
62
+ "split_special_tokens": false,
63
+ "tokenizer_class": "ChatGLMTokenizer",
64
+ "unk_token": "<unk>"
65
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.0,
3
+ "total_flos": 2.5580424283828716e+18,
4
+ "train_loss": 0.9693152533637153,
5
+ "train_runtime": 60193.9387,
6
+ "train_samples_per_second": 1.148,
7
+ "train_steps_per_second": 0.012
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 720, "loss": 1.6827, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.5e-05, "epoch": 0.125, "percentage": 1.39, "elapsed_time": "0:13:23", "remaining_time": "15:50:19"}
2
+ {"current_steps": 20, "total_steps": 720, "loss": 1.6309, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 5e-05, "epoch": 0.25, "percentage": 2.78, "elapsed_time": "0:26:45", "remaining_time": "15:36:27"}
3
+ {"current_steps": 30, "total_steps": 720, "loss": 1.5415, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.997482666353287e-05, "epoch": 0.375, "percentage": 4.17, "elapsed_time": "0:40:07", "remaining_time": "15:22:55"}
4
+ {"current_steps": 40, "total_steps": 720, "loss": 1.393, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.989935734988098e-05, "epoch": 0.5, "percentage": 5.56, "elapsed_time": "0:53:29", "remaining_time": "15:09:26"}
5
+ {"current_steps": 50, "total_steps": 720, "loss": 1.2563, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.977374404419837e-05, "epoch": 0.625, "percentage": 6.94, "elapsed_time": "1:06:51", "remaining_time": "14:55:59"}
6
+ {"current_steps": 60, "total_steps": 720, "loss": 1.1963, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.959823971496574e-05, "epoch": 0.75, "percentage": 8.33, "elapsed_time": "1:20:14", "remaining_time": "14:42:34"}
7
+ {"current_steps": 70, "total_steps": 720, "loss": 1.1385, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.937319780454559e-05, "epoch": 0.875, "percentage": 9.72, "elapsed_time": "1:33:36", "remaining_time": "14:29:08"}
8
+ {"current_steps": 80, "total_steps": 720, "loss": 1.1085, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.909907151739633e-05, "epoch": 1.0, "percentage": 11.11, "elapsed_time": "1:46:56", "remaining_time": "14:15:30"}
9
+ {"current_steps": 90, "total_steps": 720, "loss": 1.1025, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.877641290737884e-05, "epoch": 1.125, "percentage": 12.5, "elapsed_time": "2:00:18", "remaining_time": "14:02:06"}
10
+ {"current_steps": 100, "total_steps": 720, "loss": 1.0558, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.8405871765993433e-05, "epoch": 1.25, "percentage": 13.89, "elapsed_time": "2:13:40", "remaining_time": "13:48:45"}
11
+ {"current_steps": 100, "total_steps": 720, "loss": null, "eval_loss": 1.0500283241271973, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": null, "epoch": 1.25, "percentage": 13.89, "elapsed_time": "2:13:40", "remaining_time": "13:48:45"}
12
+ {"current_steps": 110, "total_steps": 720, "loss": 1.0258, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.7988194313786275e-05, "epoch": 1.375, "percentage": 15.28, "elapsed_time": "2:32:55", "remaining_time": "14:07:59"}
13
+ {"current_steps": 120, "total_steps": 720, "loss": 1.0261, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.752422169756048e-05, "epoch": 1.5, "percentage": 16.67, "elapsed_time": "2:46:16", "remaining_time": "13:51:24"}
14
+ {"current_steps": 130, "total_steps": 720, "loss": 0.9923, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.701488829641845e-05, "epoch": 1.625, "percentage": 18.06, "elapsed_time": "2:59:38", "remaining_time": "13:35:18"}
15
+ {"current_steps": 140, "total_steps": 720, "loss": 0.9835, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.6461219840046654e-05, "epoch": 1.75, "percentage": 19.44, "elapsed_time": "3:13:00", "remaining_time": "13:19:35"}
16
+ {"current_steps": 150, "total_steps": 720, "loss": 1.0039, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.586433134303257e-05, "epoch": 1.875, "percentage": 20.83, "elapsed_time": "3:26:22", "remaining_time": "13:04:12"}
17
+ {"current_steps": 160, "total_steps": 720, "loss": 0.9947, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.522542485937369e-05, "epoch": 2.0, "percentage": 22.22, "elapsed_time": "3:39:42", "remaining_time": "12:48:58"}
18
+ {"current_steps": 170, "total_steps": 720, "loss": 0.9821, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.454578706170075e-05, "epoch": 2.125, "percentage": 23.61, "elapsed_time": "3:53:04", "remaining_time": "12:34:03"}
19
+ {"current_steps": 180, "total_steps": 720, "loss": 0.9535, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.382678665009028e-05, "epoch": 2.25, "percentage": 25.0, "elapsed_time": "4:06:26", "remaining_time": "12:19:18"}
20
+ {"current_steps": 190, "total_steps": 720, "loss": 0.9514, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.306987159568479e-05, "epoch": 2.375, "percentage": 26.39, "elapsed_time": "4:19:47", "remaining_time": "12:04:42"}
21
+ {"current_steps": 200, "total_steps": 720, "loss": 0.9566, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.227656622467162e-05, "epoch": 2.5, "percentage": 27.78, "elapsed_time": "4:33:09", "remaining_time": "11:50:13"}
22
+ {"current_steps": 200, "total_steps": 720, "loss": null, "eval_loss": 0.9630343914031982, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": null, "epoch": 2.5, "percentage": 27.78, "elapsed_time": "4:33:09", "remaining_time": "11:50:13"}
23
+ {"current_steps": 210, "total_steps": 720, "loss": 0.9655, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.144846814849282e-05, "epoch": 2.625, "percentage": 29.17, "elapsed_time": "4:52:25", "remaining_time": "11:50:09"}
24
+ {"current_steps": 220, "total_steps": 720, "loss": 0.9537, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.058724504646834e-05, "epoch": 2.75, "percentage": 30.56, "elapsed_time": "5:05:46", "remaining_time": "11:34:56"}
25
+ {"current_steps": 230, "total_steps": 720, "loss": 0.951, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.969463130731183e-05, "epoch": 2.875, "percentage": 31.94, "elapsed_time": "5:19:08", "remaining_time": "11:19:54"}
26
+ {"current_steps": 240, "total_steps": 720, "loss": 0.938, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.8772424536302564e-05, "epoch": 3.0, "percentage": 33.33, "elapsed_time": "5:32:28", "remaining_time": "11:04:57"}
27
+ {"current_steps": 250, "total_steps": 720, "loss": 0.955, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.782248193514766e-05, "epoch": 3.125, "percentage": 34.72, "elapsed_time": "5:45:50", "remaining_time": "10:50:10"}
28
+ {"current_steps": 260, "total_steps": 720, "loss": 0.9319, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.6846716561824965e-05, "epoch": 3.25, "percentage": 36.11, "elapsed_time": "5:59:11", "remaining_time": "10:35:30"}
29
+ {"current_steps": 270, "total_steps": 720, "loss": 0.9385, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.5847093477938956e-05, "epoch": 3.375, "percentage": 37.5, "elapsed_time": "6:12:33", "remaining_time": "10:20:55"}
30
+ {"current_steps": 280, "total_steps": 720, "loss": 0.911, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.4825625791348096e-05, "epoch": 3.5, "percentage": 38.89, "elapsed_time": "6:25:54", "remaining_time": "10:06:26"}
31
+ {"current_steps": 290, "total_steps": 720, "loss": 0.9366, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.378437060203357e-05, "epoch": 3.625, "percentage": 40.28, "elapsed_time": "6:39:17", "remaining_time": "9:52:02"}
32
+ {"current_steps": 300, "total_steps": 720, "loss": 0.9082, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.272542485937369e-05, "epoch": 3.75, "percentage": 41.67, "elapsed_time": "6:52:38", "remaining_time": "9:37:42"}
33
+ {"current_steps": 300, "total_steps": 720, "loss": null, "eval_loss": 0.928753137588501, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": null, "epoch": 3.75, "percentage": 41.67, "elapsed_time": "6:52:38", "remaining_time": "9:37:42"}
34
+ {"current_steps": 310, "total_steps": 720, "loss": 0.9158, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.165092113916688e-05, "epoch": 3.875, "percentage": 43.06, "elapsed_time": "7:11:54", "remaining_time": "9:31:13"}
35
+ {"current_steps": 320, "total_steps": 720, "loss": 0.9027, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.056302334890786e-05, "epoch": 4.0, "percentage": 44.44, "elapsed_time": "7:25:14", "remaining_time": "9:16:32"}
36
+ {"current_steps": 330, "total_steps": 720, "loss": 0.9336, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.9463922369965917e-05, "epoch": 4.125, "percentage": 45.83, "elapsed_time": "7:38:36", "remaining_time": "9:01:59"}
37
+ {"current_steps": 340, "total_steps": 720, "loss": 0.9161, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.8355831645441388e-05, "epoch": 4.25, "percentage": 47.22, "elapsed_time": "7:51:58", "remaining_time": "8:47:30"}
38
+ {"current_steps": 350, "total_steps": 720, "loss": 0.8966, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.724098272258584e-05, "epoch": 4.375, "percentage": 48.61, "elapsed_time": "8:05:20", "remaining_time": "8:33:04"}
39
+ {"current_steps": 360, "total_steps": 720, "loss": 0.8954, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.6121620758762877e-05, "epoch": 4.5, "percentage": 50.0, "elapsed_time": "8:18:42", "remaining_time": "8:18:42"}
40
+ {"current_steps": 370, "total_steps": 720, "loss": 0.8815, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.5e-05, "epoch": 4.625, "percentage": 51.39, "elapsed_time": "8:32:04", "remaining_time": "8:04:24"}
41
+ {"current_steps": 380, "total_steps": 720, "loss": 0.89, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.3878379241237136e-05, "epoch": 4.75, "percentage": 52.78, "elapsed_time": "8:45:26", "remaining_time": "7:50:08"}
42
+ {"current_steps": 390, "total_steps": 720, "loss": 0.9196, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.2759017277414166e-05, "epoch": 4.875, "percentage": 54.17, "elapsed_time": "8:58:48", "remaining_time": "7:35:55"}
43
+ {"current_steps": 400, "total_steps": 720, "loss": 0.8992, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.164416835455862e-05, "epoch": 5.0, "percentage": 55.56, "elapsed_time": "9:12:08", "remaining_time": "7:21:43"}
44
+ {"current_steps": 400, "total_steps": 720, "loss": null, "eval_loss": 0.9107962846755981, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": null, "epoch": 5.0, "percentage": 55.56, "elapsed_time": "9:12:08", "remaining_time": "7:21:43"}
45
+ {"current_steps": 410, "total_steps": 720, "loss": 0.888, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.0536077630034086e-05, "epoch": 5.125, "percentage": 56.94, "elapsed_time": "9:31:24", "remaining_time": "7:12:02"}
46
+ {"current_steps": 420, "total_steps": 720, "loss": 0.8901, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.9436976651092144e-05, "epoch": 5.25, "percentage": 58.33, "elapsed_time": "9:44:46", "remaining_time": "6:57:41"}
47
+ {"current_steps": 430, "total_steps": 720, "loss": 0.9147, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.8349078860833123e-05, "epoch": 5.375, "percentage": 59.72, "elapsed_time": "9:58:08", "remaining_time": "6:43:23"}
48
+ {"current_steps": 440, "total_steps": 720, "loss": 0.8925, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.7274575140626318e-05, "epoch": 5.5, "percentage": 61.11, "elapsed_time": "10:11:29", "remaining_time": "6:29:08"}
49
+ {"current_steps": 450, "total_steps": 720, "loss": 0.9012, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.621562939796643e-05, "epoch": 5.625, "percentage": 62.5, "elapsed_time": "10:24:51", "remaining_time": "6:14:55"}
50
+ {"current_steps": 460, "total_steps": 720, "loss": 0.8808, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.5174374208651912e-05, "epoch": 5.75, "percentage": 63.89, "elapsed_time": "10:38:13", "remaining_time": "6:00:44"}
51
+ {"current_steps": 470, "total_steps": 720, "loss": 0.8816, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.4152906522061048e-05, "epoch": 5.875, "percentage": 65.28, "elapsed_time": "10:51:35", "remaining_time": "5:46:35"}
52
+ {"current_steps": 480, "total_steps": 720, "loss": 0.8941, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.3153283438175034e-05, "epoch": 6.0, "percentage": 66.67, "elapsed_time": "11:04:55", "remaining_time": "5:32:27"}
53
+ {"current_steps": 490, "total_steps": 720, "loss": 0.9048, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.217751806485235e-05, "epoch": 6.125, "percentage": 68.06, "elapsed_time": "11:18:17", "remaining_time": "5:18:22"}
54
+ {"current_steps": 500, "total_steps": 720, "loss": 0.8874, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.122757546369744e-05, "epoch": 6.25, "percentage": 69.44, "elapsed_time": "11:31:39", "remaining_time": "5:04:19"}
55
+ {"current_steps": 500, "total_steps": 720, "loss": null, "eval_loss": 0.9028034806251526, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": null, "epoch": 6.25, "percentage": 69.44, "elapsed_time": "11:31:39", "remaining_time": "5:04:19"}
56
+ {"current_steps": 510, "total_steps": 720, "loss": 0.8738, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.0305368692688174e-05, "epoch": 6.375, "percentage": 70.83, "elapsed_time": "11:50:54", "remaining_time": "4:52:43"}
57
+ {"current_steps": 520, "total_steps": 720, "loss": 0.8951, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 9.412754953531663e-06, "epoch": 6.5, "percentage": 72.22, "elapsed_time": "12:04:16", "remaining_time": "4:38:33"}
58
+ {"current_steps": 530, "total_steps": 720, "loss": 0.8914, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 8.551531851507186e-06, "epoch": 6.625, "percentage": 73.61, "elapsed_time": "12:17:37", "remaining_time": "4:24:26"}
59
+ {"current_steps": 540, "total_steps": 720, "loss": 0.8818, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 7.723433775328384e-06, "epoch": 6.75, "percentage": 75.0, "elapsed_time": "12:30:59", "remaining_time": "4:10:19"}
60
+ {"current_steps": 550, "total_steps": 720, "loss": 0.8794, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 6.930128404315214e-06, "epoch": 6.875, "percentage": 76.39, "elapsed_time": "12:44:21", "remaining_time": "3:56:15"}
61
+ {"current_steps": 560, "total_steps": 720, "loss": 0.8814, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 6.173213349909729e-06, "epoch": 7.0, "percentage": 77.78, "elapsed_time": "12:57:41", "remaining_time": "3:42:11"}
62
+ {"current_steps": 570, "total_steps": 720, "loss": 0.8909, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 5.454212938299255e-06, "epoch": 7.125, "percentage": 79.17, "elapsed_time": "13:11:03", "remaining_time": "3:28:10"}
63
+ {"current_steps": 580, "total_steps": 720, "loss": 0.8737, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.7745751406263165e-06, "epoch": 7.25, "percentage": 80.56, "elapsed_time": "13:24:25", "remaining_time": "3:14:10"}
64
+ {"current_steps": 590, "total_steps": 720, "loss": 0.8937, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.135668656967434e-06, "epoch": 7.375, "percentage": 81.94, "elapsed_time": "13:37:47", "remaining_time": "3:00:11"}
65
+ {"current_steps": 600, "total_steps": 720, "loss": 0.8835, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 3.5387801599533475e-06, "epoch": 7.5, "percentage": 83.33, "elapsed_time": "13:51:09", "remaining_time": "2:46:13"}
66
+ {"current_steps": 600, "total_steps": 720, "loss": null, "eval_loss": 0.899681031703949, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": null, "epoch": 7.5, "percentage": 83.33, "elapsed_time": "13:51:09", "remaining_time": "2:46:13"}
67
+ {"current_steps": 610, "total_steps": 720, "loss": 0.8841, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.98511170358155e-06, "epoch": 7.625, "percentage": 84.72, "elapsed_time": "14:10:24", "remaining_time": "2:33:21"}
68
+ {"current_steps": 620, "total_steps": 720, "loss": 0.8979, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.475778302439524e-06, "epoch": 7.75, "percentage": 86.11, "elapsed_time": "14:23:46", "remaining_time": "2:19:19"}
69
+ {"current_steps": 630, "total_steps": 720, "loss": 0.8696, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.0118056862137357e-06, "epoch": 7.875, "percentage": 87.5, "elapsed_time": "14:37:08", "remaining_time": "2:05:18"}
70
+ {"current_steps": 640, "total_steps": 720, "loss": 0.8782, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.59412823400657e-06, "epoch": 8.0, "percentage": 88.89, "elapsed_time": "14:50:28", "remaining_time": "1:51:18"}
71
+ {"current_steps": 650, "total_steps": 720, "loss": 0.8873, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.2235870926211619e-06, "epoch": 8.125, "percentage": 90.28, "elapsed_time": "15:03:50", "remaining_time": "1:37:20"}
72
+ {"current_steps": 660, "total_steps": 720, "loss": 0.8737, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 9.009284826036691e-07, "epoch": 8.25, "percentage": 91.67, "elapsed_time": "15:17:11", "remaining_time": "1:23:22"}
73
+ {"current_steps": 670, "total_steps": 720, "loss": 0.8883, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 6.268021954544096e-07, "epoch": 8.375, "percentage": 93.06, "elapsed_time": "15:30:33", "remaining_time": "1:09:26"}
74
+ {"current_steps": 680, "total_steps": 720, "loss": 0.87, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 4.0176028503425835e-07, "epoch": 8.5, "percentage": 94.44, "elapsed_time": "15:43:54", "remaining_time": "0:55:31"}
75
+ {"current_steps": 690, "total_steps": 720, "loss": 0.8746, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.262559558016325e-07, "epoch": 8.625, "percentage": 95.83, "elapsed_time": "15:57:16", "remaining_time": "0:41:37"}
76
+ {"current_steps": 700, "total_steps": 720, "loss": 0.8912, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 1.006426501190233e-07, "epoch": 8.75, "percentage": 97.22, "elapsed_time": "16:10:38", "remaining_time": "0:27:43"}
77
+ {"current_steps": 700, "total_steps": 720, "loss": null, "eval_loss": 0.8991448283195496, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": null, "epoch": 8.75, "percentage": 97.22, "elapsed_time": "16:10:38", "remaining_time": "0:27:43"}
78
+ {"current_steps": 710, "total_steps": 720, "loss": 0.9007, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 2.5173336467135267e-08, "epoch": 8.875, "percentage": 98.61, "elapsed_time": "16:29:53", "remaining_time": "0:13:56"}
79
+ {"current_steps": 720, "total_steps": 720, "loss": 0.8796, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": 0.0, "epoch": 9.0, "percentage": 100.0, "elapsed_time": "16:43:13", "remaining_time": "0:00:00"}
80
+ {"current_steps": 720, "total_steps": 720, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": null, "epoch": 9.0, "percentage": 100.0, "elapsed_time": "16:43:13", "remaining_time": "0:00:00"}
81
+ {"current_steps": 214, "total_steps": 214, "loss": null, "eval_loss": 0.8991448283195496, "predict_loss": null, "reward": null, "accuracy": null, "learning_rate": null, "epoch": 9.0, "percentage": 100.0, "elapsed_time": "16:49:07", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8991448283195496,
3
+ "best_model_checkpoint": "../../output/chatglm3-6b/LangGPT/checkpoint-700",
4
+ "epoch": 9.0,
5
+ "eval_steps": 100,
6
+ "global_step": 720,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.125,
13
+ "grad_norm": 0.40453147888183594,
14
+ "learning_rate": 2.5e-05,
15
+ "loss": 1.6827,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.25,
20
+ "grad_norm": 0.5551838874816895,
21
+ "learning_rate": 5e-05,
22
+ "loss": 1.6309,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.375,
27
+ "grad_norm": 0.7859359383583069,
28
+ "learning_rate": 4.997482666353287e-05,
29
+ "loss": 1.5415,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.5,
34
+ "grad_norm": 0.597720742225647,
35
+ "learning_rate": 4.989935734988098e-05,
36
+ "loss": 1.393,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.625,
41
+ "grad_norm": 0.4020984172821045,
42
+ "learning_rate": 4.977374404419837e-05,
43
+ "loss": 1.2563,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.75,
48
+ "grad_norm": 0.35916563868522644,
49
+ "learning_rate": 4.959823971496574e-05,
50
+ "loss": 1.1963,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.875,
55
+ "grad_norm": 0.3013848066329956,
56
+ "learning_rate": 4.937319780454559e-05,
57
+ "loss": 1.1385,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 1.0,
62
+ "grad_norm": 0.23849129676818848,
63
+ "learning_rate": 4.909907151739633e-05,
64
+ "loss": 1.1085,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 1.125,
69
+ "grad_norm": 0.22885890305042267,
70
+ "learning_rate": 4.877641290737884e-05,
71
+ "loss": 1.1025,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 1.25,
76
+ "grad_norm": 0.20683708786964417,
77
+ "learning_rate": 4.8405871765993433e-05,
78
+ "loss": 1.0558,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 1.25,
83
+ "eval_loss": 1.0500283241271973,
84
+ "eval_runtime": 353.0769,
85
+ "eval_samples_per_second": 2.419,
86
+ "eval_steps_per_second": 0.606,
87
+ "step": 100
88
+ },
89
+ {
90
+ "epoch": 1.375,
91
+ "grad_norm": 0.20663395524024963,
92
+ "learning_rate": 4.7988194313786275e-05,
93
+ "loss": 1.0258,
94
+ "step": 110
95
+ },
96
+ {
97
+ "epoch": 1.5,
98
+ "grad_norm": 0.18335361778736115,
99
+ "learning_rate": 4.752422169756048e-05,
100
+ "loss": 1.0261,
101
+ "step": 120
102
+ },
103
+ {
104
+ "epoch": 1.625,
105
+ "grad_norm": 0.18184833228588104,
106
+ "learning_rate": 4.701488829641845e-05,
107
+ "loss": 0.9923,
108
+ "step": 130
109
+ },
110
+ {
111
+ "epoch": 1.75,
112
+ "grad_norm": 0.19089923799037933,
113
+ "learning_rate": 4.6461219840046654e-05,
114
+ "loss": 0.9835,
115
+ "step": 140
116
+ },
117
+ {
118
+ "epoch": 1.875,
119
+ "grad_norm": 0.17791251838207245,
120
+ "learning_rate": 4.586433134303257e-05,
121
+ "loss": 1.0039,
122
+ "step": 150
123
+ },
124
+ {
125
+ "epoch": 2.0,
126
+ "grad_norm": 0.18376672267913818,
127
+ "learning_rate": 4.522542485937369e-05,
128
+ "loss": 0.9947,
129
+ "step": 160
130
+ },
131
+ {
132
+ "epoch": 2.125,
133
+ "grad_norm": 0.20052292943000793,
134
+ "learning_rate": 4.454578706170075e-05,
135
+ "loss": 0.9821,
136
+ "step": 170
137
+ },
138
+ {
139
+ "epoch": 2.25,
140
+ "grad_norm": 0.19209513068199158,
141
+ "learning_rate": 4.382678665009028e-05,
142
+ "loss": 0.9535,
143
+ "step": 180
144
+ },
145
+ {
146
+ "epoch": 2.375,
147
+ "grad_norm": 0.19733993709087372,
148
+ "learning_rate": 4.306987159568479e-05,
149
+ "loss": 0.9514,
150
+ "step": 190
151
+ },
152
+ {
153
+ "epoch": 2.5,
154
+ "grad_norm": 0.18989509344100952,
155
+ "learning_rate": 4.227656622467162e-05,
156
+ "loss": 0.9566,
157
+ "step": 200
158
+ },
159
+ {
160
+ "epoch": 2.5,
161
+ "eval_loss": 0.9630343914031982,
162
+ "eval_runtime": 353.288,
163
+ "eval_samples_per_second": 2.417,
164
+ "eval_steps_per_second": 0.606,
165
+ "step": 200
166
+ },
167
+ {
168
+ "epoch": 2.625,
169
+ "grad_norm": 0.19188831746578217,
170
+ "learning_rate": 4.144846814849282e-05,
171
+ "loss": 0.9655,
172
+ "step": 210
173
+ },
174
+ {
175
+ "epoch": 2.75,
176
+ "grad_norm": 0.2034657597541809,
177
+ "learning_rate": 4.058724504646834e-05,
178
+ "loss": 0.9537,
179
+ "step": 220
180
+ },
181
+ {
182
+ "epoch": 2.875,
183
+ "grad_norm": 0.20900140702724457,
184
+ "learning_rate": 3.969463130731183e-05,
185
+ "loss": 0.951,
186
+ "step": 230
187
+ },
188
+ {
189
+ "epoch": 3.0,
190
+ "grad_norm": 0.231728196144104,
191
+ "learning_rate": 3.8772424536302564e-05,
192
+ "loss": 0.938,
193
+ "step": 240
194
+ },
195
+ {
196
+ "epoch": 3.125,
197
+ "grad_norm": 0.21837086975574493,
198
+ "learning_rate": 3.782248193514766e-05,
199
+ "loss": 0.955,
200
+ "step": 250
201
+ },
202
+ {
203
+ "epoch": 3.25,
204
+ "grad_norm": 0.2057914286851883,
205
+ "learning_rate": 3.6846716561824965e-05,
206
+ "loss": 0.9319,
207
+ "step": 260
208
+ },
209
+ {
210
+ "epoch": 3.375,
211
+ "grad_norm": 0.22230790555477142,
212
+ "learning_rate": 3.5847093477938956e-05,
213
+ "loss": 0.9385,
214
+ "step": 270
215
+ },
216
+ {
217
+ "epoch": 3.5,
218
+ "grad_norm": 0.24387766420841217,
219
+ "learning_rate": 3.4825625791348096e-05,
220
+ "loss": 0.911,
221
+ "step": 280
222
+ },
223
+ {
224
+ "epoch": 3.625,
225
+ "grad_norm": 0.2634485065937042,
226
+ "learning_rate": 3.378437060203357e-05,
227
+ "loss": 0.9366,
228
+ "step": 290
229
+ },
230
+ {
231
+ "epoch": 3.75,
232
+ "grad_norm": 0.22965680062770844,
233
+ "learning_rate": 3.272542485937369e-05,
234
+ "loss": 0.9082,
235
+ "step": 300
236
+ },
237
+ {
238
+ "epoch": 3.75,
239
+ "eval_loss": 0.928753137588501,
240
+ "eval_runtime": 353.3512,
241
+ "eval_samples_per_second": 2.417,
242
+ "eval_steps_per_second": 0.606,
243
+ "step": 300
244
+ },
245
+ {
246
+ "epoch": 3.875,
247
+ "grad_norm": 0.21778391301631927,
248
+ "learning_rate": 3.165092113916688e-05,
249
+ "loss": 0.9158,
250
+ "step": 310
251
+ },
252
+ {
253
+ "epoch": 4.0,
254
+ "grad_norm": 0.24541890621185303,
255
+ "learning_rate": 3.056302334890786e-05,
256
+ "loss": 0.9027,
257
+ "step": 320
258
+ },
259
+ {
260
+ "epoch": 4.125,
261
+ "grad_norm": 0.25015348196029663,
262
+ "learning_rate": 2.9463922369965917e-05,
263
+ "loss": 0.9336,
264
+ "step": 330
265
+ },
266
+ {
267
+ "epoch": 4.25,
268
+ "grad_norm": 0.22015893459320068,
269
+ "learning_rate": 2.8355831645441388e-05,
270
+ "loss": 0.9161,
271
+ "step": 340
272
+ },
273
+ {
274
+ "epoch": 4.375,
275
+ "grad_norm": 0.2516670823097229,
276
+ "learning_rate": 2.724098272258584e-05,
277
+ "loss": 0.8966,
278
+ "step": 350
279
+ },
280
+ {
281
+ "epoch": 4.5,
282
+ "grad_norm": 0.2541712820529938,
283
+ "learning_rate": 2.6121620758762877e-05,
284
+ "loss": 0.8954,
285
+ "step": 360
286
+ },
287
+ {
288
+ "epoch": 4.625,
289
+ "grad_norm": 0.25608915090560913,
290
+ "learning_rate": 2.5e-05,
291
+ "loss": 0.8815,
292
+ "step": 370
293
+ },
294
+ {
295
+ "epoch": 4.75,
296
+ "grad_norm": 0.24169643223285675,
297
+ "learning_rate": 2.3878379241237136e-05,
298
+ "loss": 0.89,
299
+ "step": 380
300
+ },
301
+ {
302
+ "epoch": 4.875,
303
+ "grad_norm": 0.2623349130153656,
304
+ "learning_rate": 2.2759017277414166e-05,
305
+ "loss": 0.9196,
306
+ "step": 390
307
+ },
308
+ {
309
+ "epoch": 5.0,
310
+ "grad_norm": 0.29517388343811035,
311
+ "learning_rate": 2.164416835455862e-05,
312
+ "loss": 0.8992,
313
+ "step": 400
314
+ },
315
+ {
316
+ "epoch": 5.0,
317
+ "eval_loss": 0.9107962846755981,
318
+ "eval_runtime": 353.3297,
319
+ "eval_samples_per_second": 2.417,
320
+ "eval_steps_per_second": 0.606,
321
+ "step": 400
322
+ },
323
+ {
324
+ "epoch": 5.125,
325
+ "grad_norm": 0.2589443027973175,
326
+ "learning_rate": 2.0536077630034086e-05,
327
+ "loss": 0.888,
328
+ "step": 410
329
+ },
330
+ {
331
+ "epoch": 5.25,
332
+ "grad_norm": 0.24191297590732574,
333
+ "learning_rate": 1.9436976651092144e-05,
334
+ "loss": 0.8901,
335
+ "step": 420
336
+ },
337
+ {
338
+ "epoch": 5.375,
339
+ "grad_norm": 0.27726104855537415,
340
+ "learning_rate": 1.8349078860833123e-05,
341
+ "loss": 0.9147,
342
+ "step": 430
343
+ },
344
+ {
345
+ "epoch": 5.5,
346
+ "grad_norm": 0.23908096551895142,
347
+ "learning_rate": 1.7274575140626318e-05,
348
+ "loss": 0.8925,
349
+ "step": 440
350
+ },
351
+ {
352
+ "epoch": 5.625,
353
+ "grad_norm": 0.30176234245300293,
354
+ "learning_rate": 1.621562939796643e-05,
355
+ "loss": 0.9012,
356
+ "step": 450
357
+ },
358
+ {
359
+ "epoch": 5.75,
360
+ "grad_norm": 0.23645330965518951,
361
+ "learning_rate": 1.5174374208651912e-05,
362
+ "loss": 0.8808,
363
+ "step": 460
364
+ },
365
+ {
366
+ "epoch": 5.875,
367
+ "grad_norm": 0.2720588147640228,
368
+ "learning_rate": 1.4152906522061048e-05,
369
+ "loss": 0.8816,
370
+ "step": 470
371
+ },
372
+ {
373
+ "epoch": 6.0,
374
+ "grad_norm": 0.2631034553050995,
375
+ "learning_rate": 1.3153283438175034e-05,
376
+ "loss": 0.8941,
377
+ "step": 480
378
+ },
379
+ {
380
+ "epoch": 6.125,
381
+ "grad_norm": 0.2486189901828766,
382
+ "learning_rate": 1.217751806485235e-05,
383
+ "loss": 0.9048,
384
+ "step": 490
385
+ },
386
+ {
387
+ "epoch": 6.25,
388
+ "grad_norm": 0.2926970422267914,
389
+ "learning_rate": 1.122757546369744e-05,
390
+ "loss": 0.8874,
391
+ "step": 500
392
+ },
393
+ {
394
+ "epoch": 6.25,
395
+ "eval_loss": 0.9028034806251526,
396
+ "eval_runtime": 353.2345,
397
+ "eval_samples_per_second": 2.418,
398
+ "eval_steps_per_second": 0.606,
399
+ "step": 500
400
+ },
401
+ {
402
+ "epoch": 6.375,
403
+ "grad_norm": 0.25221139192581177,
404
+ "learning_rate": 1.0305368692688174e-05,
405
+ "loss": 0.8738,
406
+ "step": 510
407
+ },
408
+ {
409
+ "epoch": 6.5,
410
+ "grad_norm": 0.2523793578147888,
411
+ "learning_rate": 9.412754953531663e-06,
412
+ "loss": 0.8951,
413
+ "step": 520
414
+ },
415
+ {
416
+ "epoch": 6.625,
417
+ "grad_norm": 0.2493809163570404,
418
+ "learning_rate": 8.551531851507186e-06,
419
+ "loss": 0.8914,
420
+ "step": 530
421
+ },
422
+ {
423
+ "epoch": 6.75,
424
+ "grad_norm": 0.2688143253326416,
425
+ "learning_rate": 7.723433775328384e-06,
426
+ "loss": 0.8818,
427
+ "step": 540
428
+ },
429
+ {
430
+ "epoch": 6.875,
431
+ "grad_norm": 0.2695543169975281,
432
+ "learning_rate": 6.930128404315214e-06,
433
+ "loss": 0.8794,
434
+ "step": 550
435
+ },
436
+ {
437
+ "epoch": 7.0,
438
+ "grad_norm": 0.27596864104270935,
439
+ "learning_rate": 6.173213349909729e-06,
440
+ "loss": 0.8814,
441
+ "step": 560
442
+ },
443
+ {
444
+ "epoch": 7.125,
445
+ "grad_norm": 0.27881208062171936,
446
+ "learning_rate": 5.454212938299255e-06,
447
+ "loss": 0.8909,
448
+ "step": 570
449
+ },
450
+ {
451
+ "epoch": 7.25,
452
+ "grad_norm": 0.2895490825176239,
453
+ "learning_rate": 4.7745751406263165e-06,
454
+ "loss": 0.8737,
455
+ "step": 580
456
+ },
457
+ {
458
+ "epoch": 7.375,
459
+ "grad_norm": 0.25476014614105225,
460
+ "learning_rate": 4.135668656967434e-06,
461
+ "loss": 0.8937,
462
+ "step": 590
463
+ },
464
+ {
465
+ "epoch": 7.5,
466
+ "grad_norm": 0.2785739600658417,
467
+ "learning_rate": 3.5387801599533475e-06,
468
+ "loss": 0.8835,
469
+ "step": 600
470
+ },
471
+ {
472
+ "epoch": 7.5,
473
+ "eval_loss": 0.899681031703949,
474
+ "eval_runtime": 353.2175,
475
+ "eval_samples_per_second": 2.418,
476
+ "eval_steps_per_second": 0.606,
477
+ "step": 600
478
+ },
479
+ {
480
+ "epoch": 7.625,
481
+ "grad_norm": 0.27525651454925537,
482
+ "learning_rate": 2.98511170358155e-06,
483
+ "loss": 0.8841,
484
+ "step": 610
485
+ },
486
+ {
487
+ "epoch": 7.75,
488
+ "grad_norm": 0.25052082538604736,
489
+ "learning_rate": 2.475778302439524e-06,
490
+ "loss": 0.8979,
491
+ "step": 620
492
+ },
493
+ {
494
+ "epoch": 7.875,
495
+ "grad_norm": 0.2501230537891388,
496
+ "learning_rate": 2.0118056862137357e-06,
497
+ "loss": 0.8696,
498
+ "step": 630
499
+ },
500
+ {
501
+ "epoch": 8.0,
502
+ "grad_norm": 0.2521611452102661,
503
+ "learning_rate": 1.59412823400657e-06,
504
+ "loss": 0.8782,
505
+ "step": 640
506
+ },
507
+ {
508
+ "epoch": 8.125,
509
+ "grad_norm": 0.249056875705719,
510
+ "learning_rate": 1.2235870926211619e-06,
511
+ "loss": 0.8873,
512
+ "step": 650
513
+ },
514
+ {
515
+ "epoch": 8.25,
516
+ "grad_norm": 0.27458131313323975,
517
+ "learning_rate": 9.009284826036691e-07,
518
+ "loss": 0.8737,
519
+ "step": 660
520
+ },
521
+ {
522
+ "epoch": 8.375,
523
+ "grad_norm": 0.24417945742607117,
524
+ "learning_rate": 6.268021954544096e-07,
525
+ "loss": 0.8883,
526
+ "step": 670
527
+ },
528
+ {
529
+ "epoch": 8.5,
530
+ "grad_norm": 0.25331562757492065,
531
+ "learning_rate": 4.0176028503425835e-07,
532
+ "loss": 0.87,
533
+ "step": 680
534
+ },
535
+ {
536
+ "epoch": 8.625,
537
+ "grad_norm": 0.25556355714797974,
538
+ "learning_rate": 2.262559558016325e-07,
539
+ "loss": 0.8746,
540
+ "step": 690
541
+ },
542
+ {
543
+ "epoch": 8.75,
544
+ "grad_norm": 0.27511876821517944,
545
+ "learning_rate": 1.006426501190233e-07,
546
+ "loss": 0.8912,
547
+ "step": 700
548
+ },
549
+ {
550
+ "epoch": 8.75,
551
+ "eval_loss": 0.8991448283195496,
552
+ "eval_runtime": 353.267,
553
+ "eval_samples_per_second": 2.417,
554
+ "eval_steps_per_second": 0.606,
555
+ "step": 700
556
+ },
557
+ {
558
+ "epoch": 8.875,
559
+ "grad_norm": 0.2639774680137634,
560
+ "learning_rate": 2.5173336467135267e-08,
561
+ "loss": 0.9007,
562
+ "step": 710
563
+ },
564
+ {
565
+ "epoch": 9.0,
566
+ "grad_norm": 0.27557557821273804,
567
+ "learning_rate": 0.0,
568
+ "loss": 0.8796,
569
+ "step": 720
570
+ },
571
+ {
572
+ "epoch": 9.0,
573
+ "step": 720,
574
+ "total_flos": 2.5580424283828716e+18,
575
+ "train_loss": 0.9693152533637153,
576
+ "train_runtime": 60193.9387,
577
+ "train_samples_per_second": 1.148,
578
+ "train_steps_per_second": 0.012
579
+ }
580
+ ],
581
+ "logging_steps": 10,
582
+ "max_steps": 720,
583
+ "num_input_tokens_seen": 0,
584
+ "num_train_epochs": 9,
585
+ "save_steps": 100,
586
+ "total_flos": 2.5580424283828716e+18,
587
+ "train_batch_size": 12,
588
+ "trial_name": null,
589
+ "trial_params": null
590
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5badf6ea1b208056e83e64d2e96f6325b3d65de256294dc09063561959f7369
3
+ size 5176
training_eval_loss.png ADDED
training_loss.png ADDED