Incomple commited on
Commit
e69f53d
·
verified ·
1 Parent(s): 98fc2a2

Training in progress, epoch 0

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
adapter_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/Phi-4-mini-instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "down_proj",
28
+ "gate_up_proj",
29
+ "qkv_proj",
30
+ "o_proj"
31
+ ],
32
+ "task_type": "CAUSAL_LM",
33
+ "trainable_token_indices": null,
34
+ "use_dora": false,
35
+ "use_rslora": false
36
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0756b9807db55b87b717f807efd250d1c7cc702cb1baa5e0ac889821ec0fbfa
3
+ size 46171456
added_tokens.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|/tool_call|>": 200026,
3
+ "<|/tool|>": 200024,
4
+ "<|assistant|>": 200019,
5
+ "<|end|>": 200020,
6
+ "<|im_end|>": 200029,
7
+ "<|system|>": 200022,
8
+ "<|tag|>": 200028,
9
+ "<|tool_call|>": 200025,
10
+ "<|tool_response|>": 200027,
11
+ "<|tool|>": 200023,
12
+ "<|user|>": 200021
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8799b7ce62bf40e724f80af87b9545f9d9043778f435040a6c08e834dc047b69
3
+ size 15524282
tokenizer_config.json ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "199999": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "200018": {
15
+ "content": "<|endofprompt|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "200019": {
23
+ "content": "<|assistant|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": true,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "200020": {
31
+ "content": "<|end|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": true,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "200021": {
39
+ "content": "<|user|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": true,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "200022": {
47
+ "content": "<|system|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": true,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "200023": {
55
+ "content": "<|tool|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": true,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "200024": {
63
+ "content": "<|/tool|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": true,
67
+ "single_word": false,
68
+ "special": false
69
+ },
70
+ "200025": {
71
+ "content": "<|tool_call|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": true,
75
+ "single_word": false,
76
+ "special": false
77
+ },
78
+ "200026": {
79
+ "content": "<|/tool_call|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": true,
83
+ "single_word": false,
84
+ "special": false
85
+ },
86
+ "200027": {
87
+ "content": "<|tool_response|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": true,
91
+ "single_word": false,
92
+ "special": false
93
+ },
94
+ "200028": {
95
+ "content": "<|tag|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": true,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "200029": {
103
+ "content": "<|im_end|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": true
109
+ }
110
+ },
111
+ "bos_token": "<|endoftext|>",
112
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'system' and 'tools' in message and message['tools'] is not none %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|tool|>' + message['tools'] + '<|/tool|>' + '<|end|>' }}{% else %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|end|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% else %}{{ eos_token }}{% endif %}",
113
+ "clean_up_tokenization_spaces": false,
114
+ "eos_token": "<|im_end|>",
115
+ "extra_special_tokens": {},
116
+ "model_max_length": 131072,
117
+ "pad_token": "<|endoftext|>",
118
+ "padding_side": "right",
119
+ "split_special_tokens": false,
120
+ "tokenizer_class": "GPT2Tokenizer",
121
+ "unk_token": "<|endoftext|>"
122
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 67, "total_steps": 1337, "loss": 4.4864, "lr": 4.925373134328357e-07, "epoch": 0.05009345794392523, "percentage": 5.01, "elapsed_time": "0:01:02", "remaining_time": "0:19:42"}
2
+ {"current_steps": 134, "total_steps": 1337, "loss": 4.4815, "lr": 9.925373134328357e-07, "epoch": 0.10018691588785046, "percentage": 10.02, "elapsed_time": "0:02:06", "remaining_time": "0:18:56"}
3
+ {"current_steps": 201, "total_steps": 1337, "loss": 4.3614, "lr": 9.451371571072319e-07, "epoch": 0.1502803738317757, "percentage": 15.03, "elapsed_time": "0:03:11", "remaining_time": "0:17:59"}
4
+ {"current_steps": 250, "total_steps": 1337, "eval_loss": 4.165477752685547, "epoch": 0.18691588785046728, "percentage": 18.7, "elapsed_time": "0:04:16", "remaining_time": "0:18:34"}
5
+ {"current_steps": 268, "total_steps": 1337, "loss": 4.1954, "lr": 8.894430590191188e-07, "epoch": 0.20037383177570092, "percentage": 20.04, "elapsed_time": "0:04:33", "remaining_time": "0:18:09"}
6
+ {"current_steps": 335, "total_steps": 1337, "loss": 3.9922, "lr": 8.337489609310058e-07, "epoch": 0.2504672897196262, "percentage": 25.06, "elapsed_time": "0:05:37", "remaining_time": "0:16:48"}
7
+ {"current_steps": 402, "total_steps": 1337, "loss": 3.6946, "lr": 7.780548628428927e-07, "epoch": 0.3005607476635514, "percentage": 30.07, "elapsed_time": "0:06:40", "remaining_time": "0:15:32"}
8
+ {"current_steps": 469, "total_steps": 1337, "loss": 3.3914, "lr": 7.223607647547797e-07, "epoch": 0.3506542056074766, "percentage": 35.08, "elapsed_time": "0:07:45", "remaining_time": "0:14:20"}
9
+ {"current_steps": 500, "total_steps": 1337, "eval_loss": 3.1697182655334473, "epoch": 0.37383177570093457, "percentage": 37.4, "elapsed_time": "0:08:32", "remaining_time": "0:14:18"}
10
+ {"current_steps": 536, "total_steps": 1337, "loss": 3.154, "lr": 6.666666666666666e-07, "epoch": 0.40074766355140184, "percentage": 40.09, "elapsed_time": "0:09:06", "remaining_time": "0:13:37"}
11
+ {"current_steps": 603, "total_steps": 1337, "loss": 2.9973, "lr": 6.109725685785536e-07, "epoch": 0.4508411214953271, "percentage": 45.1, "elapsed_time": "0:10:09", "remaining_time": "0:12:22"}
12
+ {"current_steps": 670, "total_steps": 1337, "loss": 2.815, "lr": 5.552784704904405e-07, "epoch": 0.5009345794392523, "percentage": 50.11, "elapsed_time": "0:11:13", "remaining_time": "0:11:10"}
13
+ {"current_steps": 737, "total_steps": 1337, "loss": 2.684, "lr": 4.995843724023275e-07, "epoch": 0.5510280373831775, "percentage": 55.12, "elapsed_time": "0:12:16", "remaining_time": "0:09:59"}
14
+ {"current_steps": 750, "total_steps": 1337, "eval_loss": 2.6262145042419434, "epoch": 0.5607476635514018, "percentage": 56.1, "elapsed_time": "0:12:47", "remaining_time": "0:10:01"}
15
+ {"current_steps": 804, "total_steps": 1337, "loss": 2.6152, "lr": 4.438902743142144e-07, "epoch": 0.6011214953271028, "percentage": 60.13, "elapsed_time": "0:13:39", "remaining_time": "0:09:03"}
16
+ {"current_steps": 871, "total_steps": 1337, "loss": 2.5592, "lr": 3.881961762261014e-07, "epoch": 0.6512149532710281, "percentage": 65.15, "elapsed_time": "0:14:42", "remaining_time": "0:07:52"}
17
+ {"current_steps": 938, "total_steps": 1337, "loss": 2.5099, "lr": 3.3250207813798835e-07, "epoch": 0.7013084112149532, "percentage": 70.16, "elapsed_time": "0:15:46", "remaining_time": "0:06:42"}
18
+ {"current_steps": 1000, "total_steps": 1337, "eval_loss": 2.444204807281494, "epoch": 0.7476635514018691, "percentage": 74.79, "elapsed_time": "0:17:03", "remaining_time": "0:05:44"}
19
+ {"current_steps": 1005, "total_steps": 1337, "loss": 2.4359, "lr": 2.7680798004987534e-07, "epoch": 0.7514018691588785, "percentage": 75.17, "elapsed_time": "0:17:08", "remaining_time": "0:05:39"}
20
+ {"current_steps": 1072, "total_steps": 1337, "loss": 2.4193, "lr": 2.2111388196176226e-07, "epoch": 0.8014953271028037, "percentage": 80.18, "elapsed_time": "0:18:11", "remaining_time": "0:04:29"}
21
+ {"current_steps": 1139, "total_steps": 1337, "loss": 2.4261, "lr": 1.6541978387364923e-07, "epoch": 0.851588785046729, "percentage": 85.19, "elapsed_time": "0:19:14", "remaining_time": "0:03:20"}
22
+ {"current_steps": 1206, "total_steps": 1337, "loss": 2.3706, "lr": 1.0972568578553615e-07, "epoch": 0.9016822429906542, "percentage": 90.2, "elapsed_time": "0:20:17", "remaining_time": "0:02:12"}
23
+ {"current_steps": 1250, "total_steps": 1337, "eval_loss": 2.3879618644714355, "epoch": 0.9345794392523364, "percentage": 93.49, "elapsed_time": "0:21:17", "remaining_time": "0:01:28"}
24
+ {"current_steps": 1273, "total_steps": 1337, "loss": 2.3796, "lr": 5.403158769742311e-08, "epoch": 0.9517757009345794, "percentage": 95.21, "elapsed_time": "0:21:39", "remaining_time": "0:01:05"}
25
+ {"current_steps": 1337, "total_steps": 1337, "epoch": 0.9996261682242991, "percentage": 100.0, "elapsed_time": "0:22:40", "remaining_time": "0:00:00"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c73af070a116712a13665b03156402a22c7b40bec0a1cf04cb8caeee48758be
3
+ size 5688
vocab.json ADDED
The diff for this file is too large to render. See raw diff