hosseinbv commited on
Commit
3ada004
·
verified ·
1 Parent(s): 391e28b

Uploading /ephemeral/hossein/output/newData-progressive-yoco-tiny-llama-CDL-18

Browse files
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: TinyLlama/TinyLlama_v1.1
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: newData-progressive-yoco-tiny-llama-CDL-18
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # newData-progressive-yoco-tiny-llama-CDL-18
18
+
19
+ This model is a fine-tuned version of [/ephemeral/hossein/output/newData-progressive-yoco-tiny-llama-CDL-19/checkpoint-50](https://huggingface.co//ephemeral/hossein/output/newData-progressive-yoco-tiny-llama-CDL-19/checkpoint-50) on the alpaca_reformatted, the UltraInteract_sft_reformatted, the reformatted_ultrachat_200k, the reformatted_MathInstruct and the small_slim_pajama datasets.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 58
40
+ - eval_batch_size: 1
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - gradient_accumulation_steps: 4
45
+ - total_train_batch_size: 1856
46
+ - total_eval_batch_size: 8
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.005
50
+ - training_steps: 50
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.45.2
59
+ - Pytorch 2.5.1+cu124
60
+ - Datasets 3.1.0
61
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.05959475566150179,
3
+ "total_flos": 106890730143744.0,
4
+ "train_loss": 2.030773923397064,
5
+ "train_runtime": 1597.0722,
6
+ "train_samples_per_second": 58.106,
7
+ "train_steps_per_second": 0.031
8
+ }
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_name_or_path": "TinyLlama/TinyLlama_v1.1", "architectures": ["ProgressiveYocoLlamaForCausalLM"], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 1, "crossDecoder_start_idx": 4, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 2048, "initializer_range": 0.02, "intermediate_size": 5632, "max_position_embeddings": 2048, "mlp_bias": false, "model_type": "progressive_yoco_llama", "num_attention_heads": 32, "num_hidden_layers": 22, "num_key_value_heads": 4, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 10000.0, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "transformers_version": "4.45.2", "use_cache": false, "vocab_size": 32000}
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 2048,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.45.2"
7
+ }
runs/Nov29_09-39-22_creative-turing-2/events.out.tfevents.1732873360.creative-turing-2.2677938.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2264760cd5763388436a75b397ca474decb033ca3d6589a65ac38a2e8c178c85
3
+ size 16158
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ content }}{% elif message['role'] == 'assistant' %}{{ content }}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": false,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "padding_side": "right",
39
+ "sp_model_kwargs": {},
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "LlamaTokenizer",
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": false
44
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.05959475566150179,
3
+ "total_flos": 106890730143744.0,
4
+ "train_loss": 2.030773923397064,
5
+ "train_runtime": 1597.0722,
6
+ "train_samples_per_second": 58.106,
7
+ "train_steps_per_second": 0.031
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 1, "total_steps": 50, "loss": 2.1431, "lr": 2e-05, "epoch": 0.0011918951132300357, "percentage": 2.0, "elapsed_time": "0:00:35", "remaining_time": "0:28:40"}
2
+ {"current_steps": 2, "total_steps": 50, "loss": 2.138, "lr": 1.9979453927503366e-05, "epoch": 0.0023837902264600714, "percentage": 4.0, "elapsed_time": "0:01:06", "remaining_time": "0:26:35"}
3
+ {"current_steps": 3, "total_steps": 50, "loss": 2.0438, "lr": 1.991790013823246e-05, "epoch": 0.003575685339690107, "percentage": 6.0, "elapsed_time": "0:01:38", "remaining_time": "0:25:35"}
4
+ {"current_steps": 4, "total_steps": 50, "loss": 2.0838, "lr": 1.9815591569910654e-05, "epoch": 0.004767580452920143, "percentage": 8.0, "elapsed_time": "0:02:09", "remaining_time": "0:24:51"}
5
+ {"current_steps": 5, "total_steps": 50, "loss": 2.0696, "lr": 1.9672948630390296e-05, "epoch": 0.0059594755661501785, "percentage": 10.0, "elapsed_time": "0:02:41", "remaining_time": "0:24:12"}
6
+ {"current_steps": 6, "total_steps": 50, "loss": 2.0977, "lr": 1.949055747010669e-05, "epoch": 0.007151370679380214, "percentage": 12.0, "elapsed_time": "0:03:13", "remaining_time": "0:23:36"}
7
+ {"current_steps": 7, "total_steps": 50, "loss": 2.0518, "lr": 1.926916757346022e-05, "epoch": 0.00834326579261025, "percentage": 14.0, "elapsed_time": "0:03:44", "remaining_time": "0:23:01"}
8
+ {"current_steps": 8, "total_steps": 50, "loss": 2.0096, "lr": 1.900968867902419e-05, "epoch": 0.009535160905840286, "percentage": 16.0, "elapsed_time": "0:04:16", "remaining_time": "0:22:27"}
9
+ {"current_steps": 9, "total_steps": 50, "loss": 2.0271, "lr": 1.8713187041233896e-05, "epoch": 0.010727056019070322, "percentage": 18.0, "elapsed_time": "0:04:48", "remaining_time": "0:21:53"}
10
+ {"current_steps": 10, "total_steps": 50, "loss": 2.0058, "lr": 1.8380881048918406e-05, "epoch": 0.011918951132300357, "percentage": 20.0, "elapsed_time": "0:05:20", "remaining_time": "0:21:20"}
11
+ {"current_steps": 11, "total_steps": 50, "loss": 2.01, "lr": 1.8014136218679566e-05, "epoch": 0.013110846245530394, "percentage": 22.0, "elapsed_time": "0:05:51", "remaining_time": "0:20:47"}
12
+ {"current_steps": 12, "total_steps": 50, "loss": 2.0019, "lr": 1.7614459583691346e-05, "epoch": 0.014302741358760428, "percentage": 24.0, "elapsed_time": "0:06:23", "remaining_time": "0:20:14"}
13
+ {"current_steps": 13, "total_steps": 50, "loss": 1.9932, "lr": 1.7183493500977277e-05, "epoch": 0.015494636471990465, "percentage": 26.0, "elapsed_time": "0:06:55", "remaining_time": "0:19:41"}
14
+ {"current_steps": 14, "total_steps": 50, "loss": 1.9988, "lr": 1.672300890261317e-05, "epoch": 0.0166865315852205, "percentage": 28.0, "elapsed_time": "0:07:27", "remaining_time": "0:19:09"}
15
+ {"current_steps": 15, "total_steps": 50, "loss": 2.0101, "lr": 1.6234898018587336e-05, "epoch": 0.017878426698450536, "percentage": 30.0, "elapsed_time": "0:07:58", "remaining_time": "0:18:37"}
16
+ {"current_steps": 16, "total_steps": 50, "loss": 1.9911, "lr": 1.5721166601221697e-05, "epoch": 0.01907032181168057, "percentage": 32.0, "elapsed_time": "0:08:30", "remaining_time": "0:18:04"}
17
+ {"current_steps": 17, "total_steps": 50, "loss": 2.0116, "lr": 1.5183925683105254e-05, "epoch": 0.02026221692491061, "percentage": 34.0, "elapsed_time": "0:09:02", "remaining_time": "0:17:32"}
18
+ {"current_steps": 18, "total_steps": 50, "loss": 2.0073, "lr": 1.4625382902408356e-05, "epoch": 0.021454112038140644, "percentage": 36.0, "elapsed_time": "0:09:34", "remaining_time": "0:17:00"}
19
+ {"current_steps": 19, "total_steps": 50, "loss": 2.0208, "lr": 1.4047833431223938e-05, "epoch": 0.02264600715137068, "percentage": 38.0, "elapsed_time": "0:10:05", "remaining_time": "0:16:28"}
20
+ {"current_steps": 20, "total_steps": 50, "loss": 2.031, "lr": 1.3453650544213078e-05, "epoch": 0.023837902264600714, "percentage": 40.0, "elapsed_time": "0:10:37", "remaining_time": "0:15:56"}
21
+ {"current_steps": 21, "total_steps": 50, "loss": 1.9957, "lr": 1.2845275866310325e-05, "epoch": 0.025029797377830752, "percentage": 42.0, "elapsed_time": "0:11:09", "remaining_time": "0:15:24"}
22
+ {"current_steps": 22, "total_steps": 50, "loss": 2.0049, "lr": 1.2225209339563144e-05, "epoch": 0.026221692491060787, "percentage": 44.0, "elapsed_time": "0:11:41", "remaining_time": "0:14:52"}
23
+ {"current_steps": 23, "total_steps": 50, "loss": 2.02, "lr": 1.1595998950333794e-05, "epoch": 0.027413587604290822, "percentage": 46.0, "elapsed_time": "0:12:12", "remaining_time": "0:14:20"}
24
+ {"current_steps": 24, "total_steps": 50, "loss": 2.0122, "lr": 1.0960230259076819e-05, "epoch": 0.028605482717520857, "percentage": 48.0, "elapsed_time": "0:12:44", "remaining_time": "0:13:48"}
25
+ {"current_steps": 25, "total_steps": 50, "loss": 2.0194, "lr": 1.0320515775716556e-05, "epoch": 0.029797377830750895, "percentage": 50.0, "elapsed_time": "0:13:16", "remaining_time": "0:13:16"}
26
+ {"current_steps": 26, "total_steps": 50, "loss": 2.0244, "lr": 9.67948422428345e-06, "epoch": 0.03098927294398093, "percentage": 52.0, "elapsed_time": "0:13:48", "remaining_time": "0:12:44"}
27
+ {"current_steps": 27, "total_steps": 50, "loss": 2.0301, "lr": 9.039769740923183e-06, "epoch": 0.03218116805721097, "percentage": 54.0, "elapsed_time": "0:14:19", "remaining_time": "0:12:12"}
28
+ {"current_steps": 28, "total_steps": 50, "loss": 2.039, "lr": 8.404001049666211e-06, "epoch": 0.033373063170441, "percentage": 56.0, "elapsed_time": "0:14:51", "remaining_time": "0:11:40"}
29
+ {"current_steps": 29, "total_steps": 50, "loss": 2.0065, "lr": 7.774790660436857e-06, "epoch": 0.03456495828367104, "percentage": 58.0, "elapsed_time": "0:15:23", "remaining_time": "0:11:08"}
30
+ {"current_steps": 30, "total_steps": 50, "loss": 1.9824, "lr": 7.154724133689677e-06, "epoch": 0.03575685339690107, "percentage": 60.0, "elapsed_time": "0:15:55", "remaining_time": "0:10:36"}
31
+ {"current_steps": 31, "total_steps": 50, "loss": 2.0211, "lr": 6.546349455786926e-06, "epoch": 0.03694874851013111, "percentage": 62.0, "elapsed_time": "0:16:26", "remaining_time": "0:10:04"}
32
+ {"current_steps": 32, "total_steps": 50, "loss": 2.0364, "lr": 5.952166568776062e-06, "epoch": 0.03814064362336114, "percentage": 64.0, "elapsed_time": "0:16:58", "remaining_time": "0:09:32"}
33
+ {"current_steps": 33, "total_steps": 50, "loss": 2.0124, "lr": 5.37461709759165e-06, "epoch": 0.03933253873659118, "percentage": 66.0, "elapsed_time": "0:17:30", "remaining_time": "0:09:01"}
34
+ {"current_steps": 34, "total_steps": 50, "loss": 2.0512, "lr": 4.81607431689475e-06, "epoch": 0.04052443384982122, "percentage": 68.0, "elapsed_time": "0:18:02", "remaining_time": "0:08:29"}
35
+ {"current_steps": 35, "total_steps": 50, "loss": 2.0375, "lr": 4.278833398778306e-06, "epoch": 0.041716328963051254, "percentage": 70.0, "elapsed_time": "0:18:33", "remaining_time": "0:07:57"}
36
+ {"current_steps": 36, "total_steps": 50, "loss": 2.0665, "lr": 3.7651019814126656e-06, "epoch": 0.04290822407628129, "percentage": 72.0, "elapsed_time": "0:19:05", "remaining_time": "0:07:25"}
37
+ {"current_steps": 37, "total_steps": 50, "loss": 2.0289, "lr": 3.2769910973868314e-06, "epoch": 0.04410011918951132, "percentage": 74.0, "elapsed_time": "0:19:37", "remaining_time": "0:06:53"}
38
+ {"current_steps": 38, "total_steps": 50, "loss": 2.0451, "lr": 2.8165064990227255e-06, "epoch": 0.04529201430274136, "percentage": 76.0, "elapsed_time": "0:20:09", "remaining_time": "0:06:21"}
39
+ {"current_steps": 39, "total_steps": 50, "loss": 2.0445, "lr": 2.3855404163086558e-06, "epoch": 0.04648390941597139, "percentage": 78.0, "elapsed_time": "0:20:40", "remaining_time": "0:05:49"}
40
+ {"current_steps": 40, "total_steps": 50, "loss": 2.0017, "lr": 1.9858637813204352e-06, "epoch": 0.04767580452920143, "percentage": 80.0, "elapsed_time": "0:21:12", "remaining_time": "0:05:18"}
41
+ {"current_steps": 41, "total_steps": 50, "loss": 2.0239, "lr": 1.6191189510815942e-06, "epoch": 0.04886769964243146, "percentage": 82.0, "elapsed_time": "0:21:44", "remaining_time": "0:04:46"}
42
+ {"current_steps": 42, "total_steps": 50, "loss": 1.9878, "lr": 1.286812958766106e-06, "epoch": 0.050059594755661505, "percentage": 84.0, "elapsed_time": "0:22:15", "remaining_time": "0:04:14"}
43
+ {"current_steps": 43, "total_steps": 50, "loss": 2.0205, "lr": 9.903113209758098e-07, "epoch": 0.05125148986889154, "percentage": 86.0, "elapsed_time": "0:22:47", "remaining_time": "0:03:42"}
44
+ {"current_steps": 44, "total_steps": 50, "loss": 2.0377, "lr": 7.308324265397837e-07, "epoch": 0.052443384982121574, "percentage": 88.0, "elapsed_time": "0:23:19", "remaining_time": "0:03:10"}
45
+ {"current_steps": 45, "total_steps": 50, "loss": 2.034, "lr": 5.094425298933136e-07, "epoch": 0.05363528009535161, "percentage": 90.0, "elapsed_time": "0:23:51", "remaining_time": "0:02:39"}
46
+ {"current_steps": 46, "total_steps": 50, "loss": 2.0496, "lr": 3.2705136960970554e-07, "epoch": 0.054827175208581644, "percentage": 92.0, "elapsed_time": "0:24:22", "remaining_time": "0:02:07"}
47
+ {"current_steps": 47, "total_steps": 50, "loss": 2.0299, "lr": 1.844084300893456e-07, "epoch": 0.05601907032181168, "percentage": 94.0, "elapsed_time": "0:24:54", "remaining_time": "0:01:35"}
48
+ {"current_steps": 48, "total_steps": 50, "loss": 2.0317, "lr": 8.209986176753947e-08, "epoch": 0.057210965435041714, "percentage": 96.0, "elapsed_time": "0:25:26", "remaining_time": "0:01:03"}
49
+ {"current_steps": 49, "total_steps": 50, "loss": 2.0296, "lr": 2.054607249663665e-08, "epoch": 0.058402860548271755, "percentage": 98.0, "elapsed_time": "0:25:58", "remaining_time": "0:00:31"}
50
+ {"current_steps": 50, "total_steps": 50, "loss": 2.068, "lr": 0.0, "epoch": 0.05959475566150179, "percentage": 100.0, "elapsed_time": "0:26:29", "remaining_time": "0:00:00"}
51
+ {"current_steps": 50, "total_steps": 50, "epoch": 0.05959475566150179, "percentage": 100.0, "elapsed_time": "0:26:37", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.05959475566150179,
5
+ "eval_steps": 50,
6
+ "global_step": 50,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0011918951132300357,
13
+ "grad_norm": 3.1771470700380333,
14
+ "learning_rate": 2e-05,
15
+ "loss": 2.1431,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.0023837902264600714,
20
+ "grad_norm": 3.1161312627163005,
21
+ "learning_rate": 1.9979453927503366e-05,
22
+ "loss": 2.138,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.003575685339690107,
27
+ "grad_norm": 1.264785017261071,
28
+ "learning_rate": 1.991790013823246e-05,
29
+ "loss": 2.0438,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.004767580452920143,
34
+ "grad_norm": 3.409228751017024,
35
+ "learning_rate": 1.9815591569910654e-05,
36
+ "loss": 2.0838,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.0059594755661501785,
41
+ "grad_norm": 2.7305256748776907,
42
+ "learning_rate": 1.9672948630390296e-05,
43
+ "loss": 2.0696,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.007151370679380214,
48
+ "grad_norm": 1.8763103868114503,
49
+ "learning_rate": 1.949055747010669e-05,
50
+ "loss": 2.0977,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 0.00834326579261025,
55
+ "grad_norm": 1.5468453869545318,
56
+ "learning_rate": 1.926916757346022e-05,
57
+ "loss": 2.0518,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.009535160905840286,
62
+ "grad_norm": 1.0642293866767805,
63
+ "learning_rate": 1.900968867902419e-05,
64
+ "loss": 2.0096,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 0.010727056019070322,
69
+ "grad_norm": 0.780500699075983,
70
+ "learning_rate": 1.8713187041233896e-05,
71
+ "loss": 2.0271,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 0.011918951132300357,
76
+ "grad_norm": 0.764005329228154,
77
+ "learning_rate": 1.8380881048918406e-05,
78
+ "loss": 2.0058,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 0.013110846245530394,
83
+ "grad_norm": 0.6241582333904159,
84
+ "learning_rate": 1.8014136218679566e-05,
85
+ "loss": 2.01,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 0.014302741358760428,
90
+ "grad_norm": 0.5902053742685855,
91
+ "learning_rate": 1.7614459583691346e-05,
92
+ "loss": 2.0019,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 0.015494636471990465,
97
+ "grad_norm": 0.6425310694189268,
98
+ "learning_rate": 1.7183493500977277e-05,
99
+ "loss": 1.9932,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 0.0166865315852205,
104
+ "grad_norm": 0.5309446367223852,
105
+ "learning_rate": 1.672300890261317e-05,
106
+ "loss": 1.9988,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 0.017878426698450536,
111
+ "grad_norm": 0.5138876585539982,
112
+ "learning_rate": 1.6234898018587336e-05,
113
+ "loss": 2.0101,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.01907032181168057,
118
+ "grad_norm": 0.5392570440013659,
119
+ "learning_rate": 1.5721166601221697e-05,
120
+ "loss": 1.9911,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 0.02026221692491061,
125
+ "grad_norm": 0.5029299842352577,
126
+ "learning_rate": 1.5183925683105254e-05,
127
+ "loss": 2.0116,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 0.021454112038140644,
132
+ "grad_norm": 0.43594035780610685,
133
+ "learning_rate": 1.4625382902408356e-05,
134
+ "loss": 2.0073,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 0.02264600715137068,
139
+ "grad_norm": 0.41696447792308583,
140
+ "learning_rate": 1.4047833431223938e-05,
141
+ "loss": 2.0208,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 0.023837902264600714,
146
+ "grad_norm": 0.3892062305227489,
147
+ "learning_rate": 1.3453650544213078e-05,
148
+ "loss": 2.031,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 0.025029797377830752,
153
+ "grad_norm": 0.3632073887411339,
154
+ "learning_rate": 1.2845275866310325e-05,
155
+ "loss": 1.9957,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 0.026221692491060787,
160
+ "grad_norm": 0.33796099940642593,
161
+ "learning_rate": 1.2225209339563144e-05,
162
+ "loss": 2.0049,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 0.027413587604290822,
167
+ "grad_norm": 0.32832752521945185,
168
+ "learning_rate": 1.1595998950333794e-05,
169
+ "loss": 2.02,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 0.028605482717520857,
174
+ "grad_norm": 0.31678859599430514,
175
+ "learning_rate": 1.0960230259076819e-05,
176
+ "loss": 2.0122,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 0.029797377830750895,
181
+ "grad_norm": 0.3054795643275271,
182
+ "learning_rate": 1.0320515775716556e-05,
183
+ "loss": 2.0194,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 0.03098927294398093,
188
+ "grad_norm": 0.30476969314662167,
189
+ "learning_rate": 9.67948422428345e-06,
190
+ "loss": 2.0244,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 0.03218116805721097,
195
+ "grad_norm": 0.3397762952406562,
196
+ "learning_rate": 9.039769740923183e-06,
197
+ "loss": 2.0301,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 0.033373063170441,
202
+ "grad_norm": 0.28294935774392,
203
+ "learning_rate": 8.404001049666211e-06,
204
+ "loss": 2.039,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 0.03456495828367104,
209
+ "grad_norm": 0.2677495861482238,
210
+ "learning_rate": 7.774790660436857e-06,
211
+ "loss": 2.0065,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 0.03575685339690107,
216
+ "grad_norm": 0.2523404664659208,
217
+ "learning_rate": 7.154724133689677e-06,
218
+ "loss": 1.9824,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 0.03694874851013111,
223
+ "grad_norm": 0.2677420506485163,
224
+ "learning_rate": 6.546349455786926e-06,
225
+ "loss": 2.0211,
226
+ "step": 31
227
+ },
228
+ {
229
+ "epoch": 0.03814064362336114,
230
+ "grad_norm": 0.25403279083921193,
231
+ "learning_rate": 5.952166568776062e-06,
232
+ "loss": 2.0364,
233
+ "step": 32
234
+ },
235
+ {
236
+ "epoch": 0.03933253873659118,
237
+ "grad_norm": 0.23827255140088083,
238
+ "learning_rate": 5.37461709759165e-06,
239
+ "loss": 2.0124,
240
+ "step": 33
241
+ },
242
+ {
243
+ "epoch": 0.04052443384982122,
244
+ "grad_norm": 0.21241476817661348,
245
+ "learning_rate": 4.81607431689475e-06,
246
+ "loss": 2.0512,
247
+ "step": 34
248
+ },
249
+ {
250
+ "epoch": 0.041716328963051254,
251
+ "grad_norm": 0.209552693603396,
252
+ "learning_rate": 4.278833398778306e-06,
253
+ "loss": 2.0375,
254
+ "step": 35
255
+ },
256
+ {
257
+ "epoch": 0.04290822407628129,
258
+ "grad_norm": 0.21360560735074388,
259
+ "learning_rate": 3.7651019814126656e-06,
260
+ "loss": 2.0665,
261
+ "step": 36
262
+ },
263
+ {
264
+ "epoch": 0.04410011918951132,
265
+ "grad_norm": 0.2182223081631352,
266
+ "learning_rate": 3.2769910973868314e-06,
267
+ "loss": 2.0289,
268
+ "step": 37
269
+ },
270
+ {
271
+ "epoch": 0.04529201430274136,
272
+ "grad_norm": 0.21248420749052188,
273
+ "learning_rate": 2.8165064990227255e-06,
274
+ "loss": 2.0451,
275
+ "step": 38
276
+ },
277
+ {
278
+ "epoch": 0.04648390941597139,
279
+ "grad_norm": 0.20688489883738986,
280
+ "learning_rate": 2.3855404163086558e-06,
281
+ "loss": 2.0445,
282
+ "step": 39
283
+ },
284
+ {
285
+ "epoch": 0.04767580452920143,
286
+ "grad_norm": 0.20278373082128726,
287
+ "learning_rate": 1.9858637813204352e-06,
288
+ "loss": 2.0017,
289
+ "step": 40
290
+ },
291
+ {
292
+ "epoch": 0.04886769964243146,
293
+ "grad_norm": 0.20097941188780546,
294
+ "learning_rate": 1.6191189510815942e-06,
295
+ "loss": 2.0239,
296
+ "step": 41
297
+ },
298
+ {
299
+ "epoch": 0.050059594755661505,
300
+ "grad_norm": 0.19492229623743026,
301
+ "learning_rate": 1.286812958766106e-06,
302
+ "loss": 1.9878,
303
+ "step": 42
304
+ },
305
+ {
306
+ "epoch": 0.05125148986889154,
307
+ "grad_norm": 0.19191563010483195,
308
+ "learning_rate": 9.903113209758098e-07,
309
+ "loss": 2.0205,
310
+ "step": 43
311
+ },
312
+ {
313
+ "epoch": 0.052443384982121574,
314
+ "grad_norm": 0.18895682704532601,
315
+ "learning_rate": 7.308324265397837e-07,
316
+ "loss": 2.0377,
317
+ "step": 44
318
+ },
319
+ {
320
+ "epoch": 0.05363528009535161,
321
+ "grad_norm": 0.2270168441556834,
322
+ "learning_rate": 5.094425298933136e-07,
323
+ "loss": 2.034,
324
+ "step": 45
325
+ },
326
+ {
327
+ "epoch": 0.054827175208581644,
328
+ "grad_norm": 0.1944028112406021,
329
+ "learning_rate": 3.2705136960970554e-07,
330
+ "loss": 2.0496,
331
+ "step": 46
332
+ },
333
+ {
334
+ "epoch": 0.05601907032181168,
335
+ "grad_norm": 0.18999114531888883,
336
+ "learning_rate": 1.844084300893456e-07,
337
+ "loss": 2.0299,
338
+ "step": 47
339
+ },
340
+ {
341
+ "epoch": 0.057210965435041714,
342
+ "grad_norm": 0.19083668329491307,
343
+ "learning_rate": 8.209986176753947e-08,
344
+ "loss": 2.0317,
345
+ "step": 48
346
+ },
347
+ {
348
+ "epoch": 0.058402860548271755,
349
+ "grad_norm": 0.19161290816553783,
350
+ "learning_rate": 2.054607249663665e-08,
351
+ "loss": 2.0296,
352
+ "step": 49
353
+ },
354
+ {
355
+ "epoch": 0.05959475566150179,
356
+ "grad_norm": 0.19193914325325095,
357
+ "learning_rate": 0.0,
358
+ "loss": 2.068,
359
+ "step": 50
360
+ },
361
+ {
362
+ "epoch": 0.05959475566150179,
363
+ "step": 50,
364
+ "total_flos": 106890730143744.0,
365
+ "train_loss": 2.030773923397064,
366
+ "train_runtime": 1597.0722,
367
+ "train_samples_per_second": 58.106,
368
+ "train_steps_per_second": 0.031
369
+ }
370
+ ],
371
+ "logging_steps": 1,
372
+ "max_steps": 50,
373
+ "num_input_tokens_seen": 0,
374
+ "num_train_epochs": 1,
375
+ "save_steps": 50,
376
+ "stateful_callbacks": {
377
+ "TrainerControl": {
378
+ "args": {
379
+ "should_epoch_stop": false,
380
+ "should_evaluate": false,
381
+ "should_log": false,
382
+ "should_save": true,
383
+ "should_training_stop": true
384
+ },
385
+ "attributes": {}
386
+ }
387
+ },
388
+ "total_flos": 106890730143744.0,
389
+ "train_batch_size": 58,
390
+ "trial_name": null,
391
+ "trial_params": null
392
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13968be49a7e025e2cb7acb0ceca624837fd0d70af3f67bd9257fae94dc374ae
3
+ size 7224
training_loss.png ADDED