rua9902 commited on
Commit
f61859f
·
verified ·
1 Parent(s): e991e86

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +64 -0
  2. checkpoint-10000/config.json +43 -0
  3. checkpoint-10000/generation_config.json +6 -0
  4. checkpoint-10000/model.safetensors +3 -0
  5. checkpoint-10000/optimizer.pt +3 -0
  6. checkpoint-10000/rng_state.pth +3 -0
  7. checkpoint-10000/scheduler.pt +3 -0
  8. checkpoint-10000/special_tokens_map.json +24 -0
  9. checkpoint-10000/tokenizer.json +0 -0
  10. checkpoint-10000/tokenizer.model +3 -0
  11. checkpoint-10000/tokenizer_config.json +43 -0
  12. checkpoint-10000/trainer_state.json +63 -0
  13. checkpoint-10000/training_args.bin +3 -0
  14. checkpoint-15000/config.json +43 -0
  15. checkpoint-15000/generation_config.json +6 -0
  16. checkpoint-15000/model.safetensors +3 -0
  17. checkpoint-15000/optimizer.pt +3 -0
  18. checkpoint-15000/rng_state.pth +3 -0
  19. checkpoint-15000/scheduler.pt +3 -0
  20. checkpoint-15000/special_tokens_map.json +24 -0
  21. checkpoint-15000/tokenizer.json +0 -0
  22. checkpoint-15000/tokenizer.model +3 -0
  23. checkpoint-15000/tokenizer_config.json +43 -0
  24. checkpoint-15000/trainer_state.json +78 -0
  25. checkpoint-15000/training_args.bin +3 -0
  26. checkpoint-20000/config.json +43 -0
  27. checkpoint-20000/generation_config.json +6 -0
  28. checkpoint-20000/model.safetensors +3 -0
  29. checkpoint-20000/optimizer.pt +3 -0
  30. checkpoint-20000/rng_state.pth +3 -0
  31. checkpoint-20000/scheduler.pt +3 -0
  32. checkpoint-20000/special_tokens_map.json +24 -0
  33. checkpoint-20000/tokenizer.json +0 -0
  34. checkpoint-20000/tokenizer.model +3 -0
  35. checkpoint-20000/tokenizer_config.json +43 -0
  36. checkpoint-20000/trainer_state.json +93 -0
  37. checkpoint-20000/training_args.bin +3 -0
  38. checkpoint-25000/config.json +43 -0
  39. checkpoint-25000/generation_config.json +6 -0
  40. checkpoint-25000/model.safetensors +3 -0
  41. checkpoint-25000/optimizer.pt +3 -0
  42. checkpoint-25000/rng_state.pth +3 -0
  43. checkpoint-25000/scheduler.pt +3 -0
  44. checkpoint-25000/special_tokens_map.json +24 -0
  45. checkpoint-25000/tokenizer.json +0 -0
  46. checkpoint-25000/tokenizer.model +3 -0
  47. checkpoint-25000/tokenizer_config.json +43 -0
  48. checkpoint-25000/trainer_state.json +108 -0
  49. checkpoint-25000/training_args.bin +3 -0
  50. checkpoint-29108/config.json +43 -0
README.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: RC-inspire-LLM-ex1-b256
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # RC-inspire-LLM-ex1-b256
14
+
15
+ This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
16
+ It achieves the following results on the evaluation set:
17
+ - Loss: 3.1645
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 0.0035355339059327372
37
+ - train_batch_size: 256
38
+ - eval_batch_size: 256
39
+ - seed: 42
40
+ - gradient_accumulation_steps: 8
41
+ - total_train_batch_size: 2048
42
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
+ - lr_scheduler_type: cosine
44
+ - lr_scheduler_warmup_steps: 1000
45
+ - num_epochs: 1
46
+ - mixed_precision_training: Native AMP
47
+
48
+ ### Training results
49
+
50
+ | Training Loss | Epoch | Step | Validation Loss |
51
+ |:-------------:|:------:|:-----:|:---------------:|
52
+ | 4.0101 | 0.1718 | 5000 | 3.6379 |
53
+ | 3.5732 | 0.3435 | 10000 | 3.4689 |
54
+ | 3.4485 | 0.5153 | 15000 | 3.3665 |
55
+ | 3.349 | 0.6871 | 20000 | 3.2674 |
56
+ | 3.247 | 0.8588 | 25000 | 3.1645 |
57
+
58
+
59
+ ### Framework versions
60
+
61
+ - Transformers 4.45.0.dev0
62
+ - Pytorch 2.5.1+cu124
63
+ - Datasets 3.2.0
64
+ - Tokenizers 0.19.1
checkpoint-10000/config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "GLU_mode": "unshared",
3
+ "RC_backward": true,
4
+ "architectures": [
5
+ "HGRNBitForCausalLM"
6
+ ],
7
+ "attn_mode": "fused_reservoir",
8
+ "bos_token_id": 1,
9
+ "conv_size": 4,
10
+ "devided_num": 1,
11
+ "embedded_vec_num": 1,
12
+ "eos_token_id": 2,
13
+ "expand_ratio": 1,
14
+ "fg_mode": "unshared",
15
+ "fg_shared": "unshared",
16
+ "fuse_cross_entropy": true,
17
+ "hidden_act": "swish",
18
+ "hidden_ratio": 4,
19
+ "hidden_size": 1024,
20
+ "initializer_range": 0.02,
21
+ "inputLayer_backward": true,
22
+ "intermediate_size": null,
23
+ "max_position_embeddings": 2048,
24
+ "model_type": "hgrn_bit",
25
+ "multiple_readout": false,
26
+ "num_heads": 1,
27
+ "num_hidden_layers": 24,
28
+ "reservoir_backward": true,
29
+ "reservoir_kernel": true,
30
+ "rms_norm_eps": 1e-06,
31
+ "share_conv_kernel": true,
32
+ "sparse": 0.8,
33
+ "tie_word_embeddings": false,
34
+ "torch_dtype": "float32",
35
+ "train_use_cache": false,
36
+ "transformers_version": "4.45.0.dev0",
37
+ "use_cache": true,
38
+ "use_lower_bound": true,
39
+ "use_r": true,
40
+ "use_short_conv": false,
41
+ "using_residual": false,
42
+ "vocab_size": 32000
43
+ }
checkpoint-10000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.45.0.dev0"
6
+ }
checkpoint-10000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23e78d5cc26f1a6b5188cd9378790390fbc8a5a35070435d6721693bd81574c7
3
+ size 1597239512
checkpoint-10000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cb791fb59b9e731db0e4e40e29be4773c86abdc4cffd84ceecc4868274ee437
3
+ size 2791616442
checkpoint-10000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81251299c0659180c7c7c03eaf1df9fcca7393c48331b204dd67be5b9661a2c7
3
+ size 14244
checkpoint-10000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efff0a5d6bf80b1cd3c3cc4caf0b1d1b0073967333ef1b084358dbb92db7bab6
3
+ size 1256
checkpoint-10000/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-10000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-10000/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
checkpoint-10000/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": false,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }
checkpoint-10000/trainer_state.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.34353783854580433,
5
+ "eval_steps": 5000,
6
+ "global_step": 10000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.17176891927290217,
13
+ "grad_norm": 0.08905366063117981,
14
+ "learning_rate": 0.0033617902217971778,
15
+ "loss": 4.0101,
16
+ "step": 5000
17
+ },
18
+ {
19
+ "epoch": 0.17176891927290217,
20
+ "eval_loss": 3.637869119644165,
21
+ "eval_runtime": 50.7828,
22
+ "eval_samples_per_second": 1024.107,
23
+ "eval_steps_per_second": 4.017,
24
+ "step": 5000
25
+ },
26
+ {
27
+ "epoch": 0.34353783854580433,
28
+ "grad_norm": 0.16601510345935822,
29
+ "learning_rate": 0.0027140761611464783,
30
+ "loss": 3.5732,
31
+ "step": 10000
32
+ },
33
+ {
34
+ "epoch": 0.34353783854580433,
35
+ "eval_loss": 3.468949794769287,
36
+ "eval_runtime": 48.0534,
37
+ "eval_samples_per_second": 1082.275,
38
+ "eval_steps_per_second": 4.245,
39
+ "step": 10000
40
+ }
41
+ ],
42
+ "logging_steps": 5000,
43
+ "max_steps": 29108,
44
+ "num_input_tokens_seen": 0,
45
+ "num_train_epochs": 1,
46
+ "save_steps": 5000,
47
+ "stateful_callbacks": {
48
+ "TrainerControl": {
49
+ "args": {
50
+ "should_epoch_stop": false,
51
+ "should_evaluate": false,
52
+ "should_log": false,
53
+ "should_save": true,
54
+ "should_training_stop": false
55
+ },
56
+ "attributes": {}
57
+ }
58
+ },
59
+ "total_flos": 5.0056233222144e+18,
60
+ "train_batch_size": 256,
61
+ "trial_name": null,
62
+ "trial_params": null
63
+ }
checkpoint-10000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1850dc2c79c3f528a5f300fc0db6f77b6a5d6c40974f54a27b47a836321f15a
3
+ size 5304
checkpoint-15000/config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "GLU_mode": "unshared",
3
+ "RC_backward": true,
4
+ "architectures": [
5
+ "HGRNBitForCausalLM"
6
+ ],
7
+ "attn_mode": "fused_reservoir",
8
+ "bos_token_id": 1,
9
+ "conv_size": 4,
10
+ "devided_num": 1,
11
+ "embedded_vec_num": 1,
12
+ "eos_token_id": 2,
13
+ "expand_ratio": 1,
14
+ "fg_mode": "unshared",
15
+ "fg_shared": "unshared",
16
+ "fuse_cross_entropy": true,
17
+ "hidden_act": "swish",
18
+ "hidden_ratio": 4,
19
+ "hidden_size": 1024,
20
+ "initializer_range": 0.02,
21
+ "inputLayer_backward": true,
22
+ "intermediate_size": null,
23
+ "max_position_embeddings": 2048,
24
+ "model_type": "hgrn_bit",
25
+ "multiple_readout": false,
26
+ "num_heads": 1,
27
+ "num_hidden_layers": 24,
28
+ "reservoir_backward": true,
29
+ "reservoir_kernel": true,
30
+ "rms_norm_eps": 1e-06,
31
+ "share_conv_kernel": true,
32
+ "sparse": 0.8,
33
+ "tie_word_embeddings": false,
34
+ "torch_dtype": "float32",
35
+ "train_use_cache": false,
36
+ "transformers_version": "4.45.0.dev0",
37
+ "use_cache": true,
38
+ "use_lower_bound": true,
39
+ "use_r": true,
40
+ "use_short_conv": false,
41
+ "using_residual": false,
42
+ "vocab_size": 32000
43
+ }
checkpoint-15000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.45.0.dev0"
6
+ }
checkpoint-15000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f549b96dbaccad2881b839013ed4a37f2c6087bc2b0332c76b018fcb2965e90
3
+ size 1597239512
checkpoint-15000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:beac9b590b819bf4c4cc029c12e8596bb958135a9a27960a6722928b89c381c9
3
+ size 2791616442
checkpoint-15000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c320026f18de12243cb360a01cb850b8785b8a9028a5a6bde02330279a42450
3
+ size 14244
checkpoint-15000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:244a13713ea1139416b8913546b61526f04d73d2dd2a3256ddb5df31a1c0eb71
3
+ size 1256
checkpoint-15000/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-15000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-15000/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
checkpoint-15000/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": false,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }
checkpoint-15000/trainer_state.json ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.5153067578187065,
5
+ "eval_steps": 5000,
6
+ "global_step": 15000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.17176891927290217,
13
+ "grad_norm": 0.08905366063117981,
14
+ "learning_rate": 0.0033617902217971778,
15
+ "loss": 4.0101,
16
+ "step": 5000
17
+ },
18
+ {
19
+ "epoch": 0.17176891927290217,
20
+ "eval_loss": 3.637869119644165,
21
+ "eval_runtime": 50.7828,
22
+ "eval_samples_per_second": 1024.107,
23
+ "eval_steps_per_second": 4.017,
24
+ "step": 5000
25
+ },
26
+ {
27
+ "epoch": 0.34353783854580433,
28
+ "grad_norm": 0.16601510345935822,
29
+ "learning_rate": 0.0027140761611464783,
30
+ "loss": 3.5732,
31
+ "step": 10000
32
+ },
33
+ {
34
+ "epoch": 0.34353783854580433,
35
+ "eval_loss": 3.468949794769287,
36
+ "eval_runtime": 48.0534,
37
+ "eval_samples_per_second": 1082.275,
38
+ "eval_steps_per_second": 4.245,
39
+ "step": 10000
40
+ },
41
+ {
42
+ "epoch": 0.5153067578187065,
43
+ "grad_norm": 0.14218954741954803,
44
+ "learning_rate": 0.0017784362562822313,
45
+ "loss": 3.4485,
46
+ "step": 15000
47
+ },
48
+ {
49
+ "epoch": 0.5153067578187065,
50
+ "eval_loss": 3.3664753437042236,
51
+ "eval_runtime": 48.0196,
52
+ "eval_samples_per_second": 1083.037,
53
+ "eval_steps_per_second": 4.248,
54
+ "step": 15000
55
+ }
56
+ ],
57
+ "logging_steps": 5000,
58
+ "max_steps": 29108,
59
+ "num_input_tokens_seen": 0,
60
+ "num_train_epochs": 1,
61
+ "save_steps": 5000,
62
+ "stateful_callbacks": {
63
+ "TrainerControl": {
64
+ "args": {
65
+ "should_epoch_stop": false,
66
+ "should_evaluate": false,
67
+ "should_log": false,
68
+ "should_save": true,
69
+ "should_training_stop": false
70
+ },
71
+ "attributes": {}
72
+ }
73
+ },
74
+ "total_flos": 7.5084349833216e+18,
75
+ "train_batch_size": 256,
76
+ "trial_name": null,
77
+ "trial_params": null
78
+ }
checkpoint-15000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1850dc2c79c3f528a5f300fc0db6f77b6a5d6c40974f54a27b47a836321f15a
3
+ size 5304
checkpoint-20000/config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "GLU_mode": "unshared",
3
+ "RC_backward": true,
4
+ "architectures": [
5
+ "HGRNBitForCausalLM"
6
+ ],
7
+ "attn_mode": "fused_reservoir",
8
+ "bos_token_id": 1,
9
+ "conv_size": 4,
10
+ "devided_num": 1,
11
+ "embedded_vec_num": 1,
12
+ "eos_token_id": 2,
13
+ "expand_ratio": 1,
14
+ "fg_mode": "unshared",
15
+ "fg_shared": "unshared",
16
+ "fuse_cross_entropy": true,
17
+ "hidden_act": "swish",
18
+ "hidden_ratio": 4,
19
+ "hidden_size": 1024,
20
+ "initializer_range": 0.02,
21
+ "inputLayer_backward": true,
22
+ "intermediate_size": null,
23
+ "max_position_embeddings": 2048,
24
+ "model_type": "hgrn_bit",
25
+ "multiple_readout": false,
26
+ "num_heads": 1,
27
+ "num_hidden_layers": 24,
28
+ "reservoir_backward": true,
29
+ "reservoir_kernel": true,
30
+ "rms_norm_eps": 1e-06,
31
+ "share_conv_kernel": true,
32
+ "sparse": 0.8,
33
+ "tie_word_embeddings": false,
34
+ "torch_dtype": "float32",
35
+ "train_use_cache": false,
36
+ "transformers_version": "4.45.0.dev0",
37
+ "use_cache": true,
38
+ "use_lower_bound": true,
39
+ "use_r": true,
40
+ "use_short_conv": false,
41
+ "using_residual": false,
42
+ "vocab_size": 32000
43
+ }
checkpoint-20000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.45.0.dev0"
6
+ }
checkpoint-20000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39dcb8f95023732e1ab3434d74fcc2cdebbb123d54386c7079f99e3d9fc18a0d
3
+ size 1597239512
checkpoint-20000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f54534cb83a6eda52c6d71703e35114e42941016f3c38e3601de91f583fda7e
3
+ size 2791616442
checkpoint-20000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a122e66b2ab5c994fe151f34a5046128009bee762e6370fadfacc41f43f30b25
3
+ size 14244
checkpoint-20000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88a4e80fd709c60c7883a98478aa8f71ef7726c83490bf303fc13c96c24ffebe
3
+ size 1256
checkpoint-20000/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-20000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-20000/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
checkpoint-20000/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": false,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }
checkpoint-20000/trainer_state.json ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.6870756770916087,
5
+ "eval_steps": 5000,
6
+ "global_step": 20000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.17176891927290217,
13
+ "grad_norm": 0.08905366063117981,
14
+ "learning_rate": 0.0033617902217971778,
15
+ "loss": 4.0101,
16
+ "step": 5000
17
+ },
18
+ {
19
+ "epoch": 0.17176891927290217,
20
+ "eval_loss": 3.637869119644165,
21
+ "eval_runtime": 50.7828,
22
+ "eval_samples_per_second": 1024.107,
23
+ "eval_steps_per_second": 4.017,
24
+ "step": 5000
25
+ },
26
+ {
27
+ "epoch": 0.34353783854580433,
28
+ "grad_norm": 0.16601510345935822,
29
+ "learning_rate": 0.0027140761611464783,
30
+ "loss": 3.5732,
31
+ "step": 10000
32
+ },
33
+ {
34
+ "epoch": 0.34353783854580433,
35
+ "eval_loss": 3.468949794769287,
36
+ "eval_runtime": 48.0534,
37
+ "eval_samples_per_second": 1082.275,
38
+ "eval_steps_per_second": 4.245,
39
+ "step": 10000
40
+ },
41
+ {
42
+ "epoch": 0.5153067578187065,
43
+ "grad_norm": 0.14218954741954803,
44
+ "learning_rate": 0.0017784362562822313,
45
+ "loss": 3.4485,
46
+ "step": 15000
47
+ },
48
+ {
49
+ "epoch": 0.5153067578187065,
50
+ "eval_loss": 3.3664753437042236,
51
+ "eval_runtime": 48.0196,
52
+ "eval_samples_per_second": 1083.037,
53
+ "eval_steps_per_second": 4.248,
54
+ "step": 15000
55
+ },
56
+ {
57
+ "epoch": 0.6870756770916087,
58
+ "grad_norm": 0.1738550066947937,
59
+ "learning_rate": 0.0008397182465799535,
60
+ "loss": 3.349,
61
+ "step": 20000
62
+ },
63
+ {
64
+ "epoch": 0.6870756770916087,
65
+ "eval_loss": 3.267368793487549,
66
+ "eval_runtime": 48.4112,
67
+ "eval_samples_per_second": 1074.275,
68
+ "eval_steps_per_second": 4.214,
69
+ "step": 20000
70
+ }
71
+ ],
72
+ "logging_steps": 5000,
73
+ "max_steps": 29108,
74
+ "num_input_tokens_seen": 0,
75
+ "num_train_epochs": 1,
76
+ "save_steps": 5000,
77
+ "stateful_callbacks": {
78
+ "TrainerControl": {
79
+ "args": {
80
+ "should_epoch_stop": false,
81
+ "should_evaluate": false,
82
+ "should_log": false,
83
+ "should_save": true,
84
+ "should_training_stop": false
85
+ },
86
+ "attributes": {}
87
+ }
88
+ },
89
+ "total_flos": 1.00112466444288e+19,
90
+ "train_batch_size": 256,
91
+ "trial_name": null,
92
+ "trial_params": null
93
+ }
checkpoint-20000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1850dc2c79c3f528a5f300fc0db6f77b6a5d6c40974f54a27b47a836321f15a
3
+ size 5304
checkpoint-25000/config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "GLU_mode": "unshared",
3
+ "RC_backward": true,
4
+ "architectures": [
5
+ "HGRNBitForCausalLM"
6
+ ],
7
+ "attn_mode": "fused_reservoir",
8
+ "bos_token_id": 1,
9
+ "conv_size": 4,
10
+ "devided_num": 1,
11
+ "embedded_vec_num": 1,
12
+ "eos_token_id": 2,
13
+ "expand_ratio": 1,
14
+ "fg_mode": "unshared",
15
+ "fg_shared": "unshared",
16
+ "fuse_cross_entropy": true,
17
+ "hidden_act": "swish",
18
+ "hidden_ratio": 4,
19
+ "hidden_size": 1024,
20
+ "initializer_range": 0.02,
21
+ "inputLayer_backward": true,
22
+ "intermediate_size": null,
23
+ "max_position_embeddings": 2048,
24
+ "model_type": "hgrn_bit",
25
+ "multiple_readout": false,
26
+ "num_heads": 1,
27
+ "num_hidden_layers": 24,
28
+ "reservoir_backward": true,
29
+ "reservoir_kernel": true,
30
+ "rms_norm_eps": 1e-06,
31
+ "share_conv_kernel": true,
32
+ "sparse": 0.8,
33
+ "tie_word_embeddings": false,
34
+ "torch_dtype": "float32",
35
+ "train_use_cache": false,
36
+ "transformers_version": "4.45.0.dev0",
37
+ "use_cache": true,
38
+ "use_lower_bound": true,
39
+ "use_r": true,
40
+ "use_short_conv": false,
41
+ "using_residual": false,
42
+ "vocab_size": 32000
43
+ }
checkpoint-25000/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.45.0.dev0"
6
+ }
checkpoint-25000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3a6dcecd38d317124466ba11002be40c9390f752e04bc980e528403c990e4f5
3
+ size 1597239512
checkpoint-25000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2d459ec63e6d7ad77f48e99072add1927679ea3c300c380d9dfd665068e1ed1
3
+ size 2791616442
checkpoint-25000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40e903193b9436b564ddd981aa997531d38001c0b4ac1060961dcb1b49ca731c
3
+ size 14244
checkpoint-25000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f5f2b5a867e23793f577d96fdf47e336899195593ab9a9b8cf3cde59db844b1
3
+ size 1256
checkpoint-25000/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-25000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-25000/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
checkpoint-25000/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "legacy": false,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }
checkpoint-25000/trainer_state.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.8588445963645108,
5
+ "eval_steps": 5000,
6
+ "global_step": 25000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.17176891927290217,
13
+ "grad_norm": 0.08905366063117981,
14
+ "learning_rate": 0.0033617902217971778,
15
+ "loss": 4.0101,
16
+ "step": 5000
17
+ },
18
+ {
19
+ "epoch": 0.17176891927290217,
20
+ "eval_loss": 3.637869119644165,
21
+ "eval_runtime": 50.7828,
22
+ "eval_samples_per_second": 1024.107,
23
+ "eval_steps_per_second": 4.017,
24
+ "step": 5000
25
+ },
26
+ {
27
+ "epoch": 0.34353783854580433,
28
+ "grad_norm": 0.16601510345935822,
29
+ "learning_rate": 0.0027140761611464783,
30
+ "loss": 3.5732,
31
+ "step": 10000
32
+ },
33
+ {
34
+ "epoch": 0.34353783854580433,
35
+ "eval_loss": 3.468949794769287,
36
+ "eval_runtime": 48.0534,
37
+ "eval_samples_per_second": 1082.275,
38
+ "eval_steps_per_second": 4.245,
39
+ "step": 10000
40
+ },
41
+ {
42
+ "epoch": 0.5153067578187065,
43
+ "grad_norm": 0.14218954741954803,
44
+ "learning_rate": 0.0017784362562822313,
45
+ "loss": 3.4485,
46
+ "step": 15000
47
+ },
48
+ {
49
+ "epoch": 0.5153067578187065,
50
+ "eval_loss": 3.3664753437042236,
51
+ "eval_runtime": 48.0196,
52
+ "eval_samples_per_second": 1083.037,
53
+ "eval_steps_per_second": 4.248,
54
+ "step": 15000
55
+ },
56
+ {
57
+ "epoch": 0.6870756770916087,
58
+ "grad_norm": 0.1738550066947937,
59
+ "learning_rate": 0.0008397182465799535,
60
+ "loss": 3.349,
61
+ "step": 20000
62
+ },
63
+ {
64
+ "epoch": 0.6870756770916087,
65
+ "eval_loss": 3.267368793487549,
66
+ "eval_runtime": 48.4112,
67
+ "eval_samples_per_second": 1074.275,
68
+ "eval_steps_per_second": 4.214,
69
+ "step": 20000
70
+ },
71
+ {
72
+ "epoch": 0.8588445963645108,
73
+ "grad_norm": 0.1809106469154358,
74
+ "learning_rate": 0.00018334773665531966,
75
+ "loss": 3.247,
76
+ "step": 25000
77
+ },
78
+ {
79
+ "epoch": 0.8588445963645108,
80
+ "eval_loss": 3.1645448207855225,
81
+ "eval_runtime": 47.7975,
82
+ "eval_samples_per_second": 1088.069,
83
+ "eval_steps_per_second": 4.268,
84
+ "step": 25000
85
+ }
86
+ ],
87
+ "logging_steps": 5000,
88
+ "max_steps": 29108,
89
+ "num_input_tokens_seen": 0,
90
+ "num_train_epochs": 1,
91
+ "save_steps": 5000,
92
+ "stateful_callbacks": {
93
+ "TrainerControl": {
94
+ "args": {
95
+ "should_epoch_stop": false,
96
+ "should_evaluate": false,
97
+ "should_log": false,
98
+ "should_save": true,
99
+ "should_training_stop": false
100
+ },
101
+ "attributes": {}
102
+ }
103
+ },
104
+ "total_flos": 1.2514058305536e+19,
105
+ "train_batch_size": 256,
106
+ "trial_name": null,
107
+ "trial_params": null
108
+ }
checkpoint-25000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1850dc2c79c3f528a5f300fc0db6f77b6a5d6c40974f54a27b47a836321f15a
3
+ size 5304
checkpoint-29108/config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "GLU_mode": "unshared",
3
+ "RC_backward": true,
4
+ "architectures": [
5
+ "HGRNBitForCausalLM"
6
+ ],
7
+ "attn_mode": "fused_reservoir",
8
+ "bos_token_id": 1,
9
+ "conv_size": 4,
10
+ "devided_num": 1,
11
+ "embedded_vec_num": 1,
12
+ "eos_token_id": 2,
13
+ "expand_ratio": 1,
14
+ "fg_mode": "unshared",
15
+ "fg_shared": "unshared",
16
+ "fuse_cross_entropy": true,
17
+ "hidden_act": "swish",
18
+ "hidden_ratio": 4,
19
+ "hidden_size": 1024,
20
+ "initializer_range": 0.02,
21
+ "inputLayer_backward": true,
22
+ "intermediate_size": null,
23
+ "max_position_embeddings": 2048,
24
+ "model_type": "hgrn_bit",
25
+ "multiple_readout": false,
26
+ "num_heads": 1,
27
+ "num_hidden_layers": 24,
28
+ "reservoir_backward": true,
29
+ "reservoir_kernel": true,
30
+ "rms_norm_eps": 1e-06,
31
+ "share_conv_kernel": true,
32
+ "sparse": 0.8,
33
+ "tie_word_embeddings": false,
34
+ "torch_dtype": "float32",
35
+ "train_use_cache": false,
36
+ "transformers_version": "4.45.0.dev0",
37
+ "use_cache": true,
38
+ "use_lower_bound": true,
39
+ "use_r": true,
40
+ "use_short_conv": false,
41
+ "using_residual": false,
42
+ "vocab_size": 32000
43
+ }