Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +60 -0
- added_tokens.json +24 -0
- all_results.json +9 -0
- config.json +35 -0
- generation_config.json +14 -0
- llamaboard_config.yaml +77 -0
- merges.txt +0 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +346 -0
- running_log.txt +369 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +209 -0
- train_results.json +9 -0
- trainer_log.jsonl +62 -0
- trainer_state.json +531 -0
- training_args.bin +3 -0
- training_args.yaml +39 -0
- training_loss.png +0 -0
- vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers
|
| 3 |
+
license: other
|
| 4 |
+
base_model: Qwen/Qwen2.5-Coder-7B-Instruct
|
| 5 |
+
tags:
|
| 6 |
+
- llama-factory
|
| 7 |
+
- freeze
|
| 8 |
+
- generated_from_trainer
|
| 9 |
+
model-index:
|
| 10 |
+
- name: qwen_under8_nlx
|
| 11 |
+
results: []
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 15 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 16 |
+
|
| 17 |
+
# qwen_under8_nlx
|
| 18 |
+
|
| 19 |
+
This model is a fine-tuned version of [Qwen/Qwen2.5-Coder-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct) on the codes_nlx_under8 dataset.
|
| 20 |
+
|
| 21 |
+
## Model description
|
| 22 |
+
|
| 23 |
+
More information needed
|
| 24 |
+
|
| 25 |
+
## Intended uses & limitations
|
| 26 |
+
|
| 27 |
+
More information needed
|
| 28 |
+
|
| 29 |
+
## Training and evaluation data
|
| 30 |
+
|
| 31 |
+
More information needed
|
| 32 |
+
|
| 33 |
+
## Training procedure
|
| 34 |
+
|
| 35 |
+
### Training hyperparameters
|
| 36 |
+
|
| 37 |
+
The following hyperparameters were used during training:
|
| 38 |
+
- learning_rate: 5e-05
|
| 39 |
+
- train_batch_size: 16
|
| 40 |
+
- eval_batch_size: 8
|
| 41 |
+
- seed: 42
|
| 42 |
+
- distributed_type: multi-GPU
|
| 43 |
+
- num_devices: 3
|
| 44 |
+
- gradient_accumulation_steps: 8
|
| 45 |
+
- total_train_batch_size: 384
|
| 46 |
+
- total_eval_batch_size: 24
|
| 47 |
+
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 48 |
+
- lr_scheduler_type: cosine
|
| 49 |
+
- num_epochs: 1.0
|
| 50 |
+
|
| 51 |
+
### Training results
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
### Framework versions
|
| 56 |
+
|
| 57 |
+
- Transformers 4.48.2
|
| 58 |
+
- Pytorch 2.5.1+cu124
|
| 59 |
+
- Datasets 3.2.0
|
| 60 |
+
- Tokenizers 0.21.0
|
added_tokens.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|box_end|>": 151649,
|
| 5 |
+
"<|box_start|>": 151648,
|
| 6 |
+
"<|endoftext|>": 151643,
|
| 7 |
+
"<|file_sep|>": 151664,
|
| 8 |
+
"<|fim_middle|>": 151660,
|
| 9 |
+
"<|fim_pad|>": 151662,
|
| 10 |
+
"<|fim_prefix|>": 151659,
|
| 11 |
+
"<|fim_suffix|>": 151661,
|
| 12 |
+
"<|im_end|>": 151645,
|
| 13 |
+
"<|im_start|>": 151644,
|
| 14 |
+
"<|image_pad|>": 151655,
|
| 15 |
+
"<|object_ref_end|>": 151647,
|
| 16 |
+
"<|object_ref_start|>": 151646,
|
| 17 |
+
"<|quad_end|>": 151651,
|
| 18 |
+
"<|quad_start|>": 151650,
|
| 19 |
+
"<|repo_name|>": 151663,
|
| 20 |
+
"<|video_pad|>": 151656,
|
| 21 |
+
"<|vision_end|>": 151653,
|
| 22 |
+
"<|vision_pad|>": 151654,
|
| 23 |
+
"<|vision_start|>": 151652
|
| 24 |
+
}
|
all_results.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 0.991869918699187,
|
| 3 |
+
"num_input_tokens_seen": 95944704,
|
| 4 |
+
"total_flos": 4.0703306623541576e+18,
|
| 5 |
+
"train_loss": 0.5785222493234228,
|
| 6 |
+
"train_runtime": 9437.3939,
|
| 7 |
+
"train_samples_per_second": 2.499,
|
| 8 |
+
"train_steps_per_second": 0.006
|
| 9 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"Qwen2ForCausalLM"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 151643,
|
| 8 |
+
"eos_token_id": 151645,
|
| 9 |
+
"hidden_act": "silu",
|
| 10 |
+
"hidden_size": 3584,
|
| 11 |
+
"initializer_range": 0.02,
|
| 12 |
+
"intermediate_size": 18944,
|
| 13 |
+
"max_position_embeddings": 32768,
|
| 14 |
+
"max_window_layers": 28,
|
| 15 |
+
"model_type": "qwen2",
|
| 16 |
+
"num_attention_heads": 28,
|
| 17 |
+
"num_hidden_layers": 28,
|
| 18 |
+
"num_key_value_heads": 4,
|
| 19 |
+
"rms_norm_eps": 1e-06,
|
| 20 |
+
"rope_scaling": {
|
| 21 |
+
"factor": 1.0,
|
| 22 |
+
"high_freq_factor": 4.0,
|
| 23 |
+
"low_freq_factor": 1.0,
|
| 24 |
+
"original_max_position_embeddings": 32768,
|
| 25 |
+
"rope_type": "llama3"
|
| 26 |
+
},
|
| 27 |
+
"rope_theta": 1000000.0,
|
| 28 |
+
"sliding_window": null,
|
| 29 |
+
"tie_word_embeddings": false,
|
| 30 |
+
"torch_dtype": "bfloat16",
|
| 31 |
+
"transformers_version": "4.48.2",
|
| 32 |
+
"use_cache": false,
|
| 33 |
+
"use_sliding_window": false,
|
| 34 |
+
"vocab_size": 152064
|
| 35 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 151643,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
151645,
|
| 6 |
+
151643
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 151643,
|
| 9 |
+
"repetition_penalty": 1.1,
|
| 10 |
+
"temperature": 0.7,
|
| 11 |
+
"top_k": 20,
|
| 12 |
+
"top_p": 0.8,
|
| 13 |
+
"transformers_version": "4.48.2"
|
| 14 |
+
}
|
llamaboard_config.yaml
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
top.booster: liger_kernel
|
| 2 |
+
top.checkpoint_path: null
|
| 3 |
+
top.finetuning_type: freeze
|
| 4 |
+
top.model_name: Qwen2.5-Coder-7B-Instruct
|
| 5 |
+
top.quantization_bit: none
|
| 6 |
+
top.quantization_method: bitsandbytes
|
| 7 |
+
top.rope_scaling: llama3
|
| 8 |
+
top.template: qwen
|
| 9 |
+
train.additional_target: ''
|
| 10 |
+
train.apollo_rank: 256
|
| 11 |
+
train.apollo_scale: 1
|
| 12 |
+
train.apollo_target: all
|
| 13 |
+
train.apollo_update_interval: 200
|
| 14 |
+
train.badam_mode: layer
|
| 15 |
+
train.badam_switch_interval: 50
|
| 16 |
+
train.badam_switch_mode: ascending
|
| 17 |
+
train.badam_update_ratio: 0.05
|
| 18 |
+
train.batch_size: 16
|
| 19 |
+
train.compute_type: bf16
|
| 20 |
+
train.create_new_adapter: false
|
| 21 |
+
train.cutoff_len: 4096
|
| 22 |
+
train.dataset:
|
| 23 |
+
- codes_nlx_under8
|
| 24 |
+
train.dataset_dir: data
|
| 25 |
+
train.ds_offload: false
|
| 26 |
+
train.ds_stage: none
|
| 27 |
+
train.extra_args: '{}'
|
| 28 |
+
train.freeze_extra_modules: ''
|
| 29 |
+
train.freeze_trainable_layers: 2
|
| 30 |
+
train.freeze_trainable_modules: all
|
| 31 |
+
train.galore_rank: 16
|
| 32 |
+
train.galore_scale: 2
|
| 33 |
+
train.galore_target: all
|
| 34 |
+
train.galore_update_interval: 200
|
| 35 |
+
train.gradient_accumulation_steps: 8
|
| 36 |
+
train.learning_rate: 5e-5
|
| 37 |
+
train.logging_steps: 1
|
| 38 |
+
train.lora_alpha: 16
|
| 39 |
+
train.lora_dropout: 0
|
| 40 |
+
train.lora_rank: 8
|
| 41 |
+
train.lora_target: ''
|
| 42 |
+
train.loraplus_lr_ratio: 0
|
| 43 |
+
train.lr_scheduler_type: cosine
|
| 44 |
+
train.mask_history: false
|
| 45 |
+
train.max_grad_norm: '1.0'
|
| 46 |
+
train.max_samples: '50000000'
|
| 47 |
+
train.neat_packing: true
|
| 48 |
+
train.neftune_alpha: 0
|
| 49 |
+
train.num_train_epochs: '1'
|
| 50 |
+
train.packing: true
|
| 51 |
+
train.ppo_score_norm: false
|
| 52 |
+
train.ppo_whiten_rewards: false
|
| 53 |
+
train.pref_beta: 0.1
|
| 54 |
+
train.pref_ftx: 0
|
| 55 |
+
train.pref_loss: sigmoid
|
| 56 |
+
train.report_to:
|
| 57 |
+
- none
|
| 58 |
+
train.resize_vocab: false
|
| 59 |
+
train.reward_model: null
|
| 60 |
+
train.save_steps: 1000
|
| 61 |
+
train.swanlab_api_key: ''
|
| 62 |
+
train.swanlab_mode: cloud
|
| 63 |
+
train.swanlab_project: llamafactory
|
| 64 |
+
train.swanlab_run_name: ''
|
| 65 |
+
train.swanlab_workspace: ''
|
| 66 |
+
train.train_on_prompt: false
|
| 67 |
+
train.training_stage: Supervised Fine-Tuning
|
| 68 |
+
train.use_apollo: true
|
| 69 |
+
train.use_badam: false
|
| 70 |
+
train.use_dora: false
|
| 71 |
+
train.use_galore: false
|
| 72 |
+
train.use_llama_pro: true
|
| 73 |
+
train.use_pissa: false
|
| 74 |
+
train.use_rslora: false
|
| 75 |
+
train.use_swanlab: false
|
| 76 |
+
train.val_size: 0
|
| 77 |
+
train.warmup_steps: 0
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0b6f069918b07c064cbba8ae4f00f529aa9bbf84b7cdfcb7fc2694a40f6aa8ef
|
| 3 |
+
size 4877660776
|
model-00002-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9bd2dfc1a309d8e54f229c8e53e7ad3716292c710b40069f5ecab0c556d03599
|
| 3 |
+
size 4932749648
|
model-00003-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7bc1b28714c15822444b8ecbfe3b28bf9e1da312eb4ab48a500c555fbea0f502
|
| 3 |
+
size 4991480296
|
model-00004-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7e979e0d662c63653908e40664174cb6c89be0ee91500ef3887f1a7cec1b7fcc
|
| 3 |
+
size 1361612328
|
model.safetensors.index.json
ADDED
|
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 16163464192
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 13 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 14 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 15 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 16 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 18 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 19 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 20 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 21 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 22 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 23 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 24 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 25 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 26 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 27 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 28 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 29 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 30 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 31 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 32 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 33 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 34 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 35 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 36 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 37 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 38 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 39 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 40 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 41 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 42 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 43 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 44 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 45 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 46 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 47 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 48 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 49 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 50 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 51 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 52 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 53 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 54 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 55 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 56 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 57 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 58 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 59 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 60 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 61 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 62 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 63 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 64 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 65 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 66 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 67 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 68 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 69 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 70 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 71 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 72 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 73 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 74 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 75 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 76 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 77 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 78 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 79 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 80 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 81 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 82 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 83 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 84 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 85 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 86 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 87 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 88 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 89 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 90 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 91 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 92 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 93 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 94 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 95 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 96 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 97 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 98 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 99 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 100 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 101 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 102 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 103 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 104 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 105 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 106 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 107 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 108 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 109 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 110 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 111 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 112 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 113 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 114 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 115 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 116 |
+
"model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 117 |
+
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 118 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 119 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 120 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 121 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 122 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 123 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 124 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 125 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 126 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 127 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 128 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 129 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 130 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 131 |
+
"model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 132 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 133 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 134 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 135 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 136 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 137 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 138 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 139 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 140 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 141 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 142 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 143 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 144 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 145 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 146 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 147 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 148 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 149 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 150 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 151 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 152 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 153 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 154 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 155 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 156 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 157 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 158 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 159 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 160 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 161 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 162 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 163 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 164 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 165 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 166 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 167 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 168 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 169 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 170 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 171 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 172 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 173 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 174 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 175 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 176 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 177 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 178 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 179 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 180 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 181 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 182 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 183 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 184 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 185 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 186 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 187 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 188 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 189 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 190 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 191 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 192 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 193 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 194 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 195 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 196 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 197 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 198 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 199 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 200 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 201 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 202 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 203 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 204 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 205 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 206 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 207 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 208 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 209 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 210 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 211 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 212 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 213 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 214 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 215 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 216 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 217 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 218 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 219 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 220 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 221 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 222 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 223 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 224 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 225 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 226 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 227 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 228 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 229 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 230 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 231 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 232 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 233 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 234 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 235 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 236 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 237 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
| 238 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 239 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 240 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
| 241 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 242 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 243 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 244 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 245 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 246 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 247 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 248 |
+
"model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 249 |
+
"model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
| 250 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
| 251 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
| 252 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
| 253 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
| 254 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
| 255 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
| 256 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
| 257 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
| 258 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
| 259 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
| 260 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 261 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 262 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 263 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 264 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 265 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 266 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 267 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 268 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 269 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 270 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 271 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 272 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 273 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 274 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 275 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 276 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 277 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 278 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 279 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 280 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 281 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 282 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 283 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 284 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 285 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 286 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 287 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 288 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 289 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 290 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 291 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 292 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 293 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 294 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 295 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 296 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 297 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 298 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 299 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 300 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 301 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 302 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 303 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 304 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 305 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 306 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 307 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 308 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 309 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
| 310 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
| 311 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
| 312 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
| 313 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 314 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 315 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 316 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 317 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 318 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 319 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 320 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 321 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 322 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 323 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 324 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 325 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
| 326 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
| 327 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
| 328 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
| 329 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
| 330 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
| 331 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
| 332 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 333 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
| 334 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
| 335 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
| 336 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
| 337 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
| 338 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
| 339 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
| 340 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
| 341 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
| 342 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
| 343 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
| 344 |
+
"model.norm.weight": "model-00004-of-00004.safetensors"
|
| 345 |
+
}
|
| 346 |
+
}
|
running_log.txt
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[INFO|2025-07-07 19:00:02] configuration_utils.py:696 >> loading configuration file config.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/config.json
|
| 2 |
+
|
| 3 |
+
[INFO|2025-07-07 19:00:02] configuration_utils.py:768 >> Model config Qwen2Config {
|
| 4 |
+
"_name_or_path": "Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 5 |
+
"architectures": [
|
| 6 |
+
"Qwen2ForCausalLM"
|
| 7 |
+
],
|
| 8 |
+
"attention_dropout": 0.0,
|
| 9 |
+
"bos_token_id": 151643,
|
| 10 |
+
"eos_token_id": 151645,
|
| 11 |
+
"hidden_act": "silu",
|
| 12 |
+
"hidden_size": 3584,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 18944,
|
| 15 |
+
"max_position_embeddings": 32768,
|
| 16 |
+
"max_window_layers": 28,
|
| 17 |
+
"model_type": "qwen2",
|
| 18 |
+
"num_attention_heads": 28,
|
| 19 |
+
"num_hidden_layers": 28,
|
| 20 |
+
"num_key_value_heads": 4,
|
| 21 |
+
"rms_norm_eps": 1e-06,
|
| 22 |
+
"rope_scaling": null,
|
| 23 |
+
"rope_theta": 1000000.0,
|
| 24 |
+
"sliding_window": null,
|
| 25 |
+
"tie_word_embeddings": false,
|
| 26 |
+
"torch_dtype": "bfloat16",
|
| 27 |
+
"transformers_version": "4.48.2",
|
| 28 |
+
"use_cache": true,
|
| 29 |
+
"use_sliding_window": false,
|
| 30 |
+
"vocab_size": 152064
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
[INFO|2025-07-07 19:00:02] tokenization_utils_base.py:2034 >> loading file vocab.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/vocab.json
|
| 35 |
+
|
| 36 |
+
[INFO|2025-07-07 19:00:02] tokenization_utils_base.py:2034 >> loading file merges.txt from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/merges.txt
|
| 37 |
+
|
| 38 |
+
[INFO|2025-07-07 19:00:02] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/tokenizer.json
|
| 39 |
+
|
| 40 |
+
[INFO|2025-07-07 19:00:02] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
|
| 41 |
+
|
| 42 |
+
[INFO|2025-07-07 19:00:02] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at None
|
| 43 |
+
|
| 44 |
+
[INFO|2025-07-07 19:00:02] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/tokenizer_config.json
|
| 45 |
+
|
| 46 |
+
[INFO|2025-07-07 19:00:02] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
|
| 47 |
+
|
| 48 |
+
[INFO|2025-07-07 19:00:03] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 49 |
+
|
| 50 |
+
[INFO|2025-07-07 19:00:03] configuration_utils.py:696 >> loading configuration file config.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/config.json
|
| 51 |
+
|
| 52 |
+
[INFO|2025-07-07 19:00:03] configuration_utils.py:768 >> Model config Qwen2Config {
|
| 53 |
+
"_name_or_path": "Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 54 |
+
"architectures": [
|
| 55 |
+
"Qwen2ForCausalLM"
|
| 56 |
+
],
|
| 57 |
+
"attention_dropout": 0.0,
|
| 58 |
+
"bos_token_id": 151643,
|
| 59 |
+
"eos_token_id": 151645,
|
| 60 |
+
"hidden_act": "silu",
|
| 61 |
+
"hidden_size": 3584,
|
| 62 |
+
"initializer_range": 0.02,
|
| 63 |
+
"intermediate_size": 18944,
|
| 64 |
+
"max_position_embeddings": 32768,
|
| 65 |
+
"max_window_layers": 28,
|
| 66 |
+
"model_type": "qwen2",
|
| 67 |
+
"num_attention_heads": 28,
|
| 68 |
+
"num_hidden_layers": 28,
|
| 69 |
+
"num_key_value_heads": 4,
|
| 70 |
+
"rms_norm_eps": 1e-06,
|
| 71 |
+
"rope_scaling": null,
|
| 72 |
+
"rope_theta": 1000000.0,
|
| 73 |
+
"sliding_window": null,
|
| 74 |
+
"tie_word_embeddings": false,
|
| 75 |
+
"torch_dtype": "bfloat16",
|
| 76 |
+
"transformers_version": "4.48.2",
|
| 77 |
+
"use_cache": true,
|
| 78 |
+
"use_sliding_window": false,
|
| 79 |
+
"vocab_size": 152064
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
[INFO|2025-07-07 19:00:04] tokenization_utils_base.py:2034 >> loading file vocab.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/vocab.json
|
| 84 |
+
|
| 85 |
+
[INFO|2025-07-07 19:00:04] tokenization_utils_base.py:2034 >> loading file merges.txt from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/merges.txt
|
| 86 |
+
|
| 87 |
+
[INFO|2025-07-07 19:00:04] tokenization_utils_base.py:2034 >> loading file tokenizer.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/tokenizer.json
|
| 88 |
+
|
| 89 |
+
[INFO|2025-07-07 19:00:04] tokenization_utils_base.py:2034 >> loading file added_tokens.json from cache at None
|
| 90 |
+
|
| 91 |
+
[INFO|2025-07-07 19:00:04] tokenization_utils_base.py:2034 >> loading file special_tokens_map.json from cache at None
|
| 92 |
+
|
| 93 |
+
[INFO|2025-07-07 19:00:04] tokenization_utils_base.py:2034 >> loading file tokenizer_config.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/tokenizer_config.json
|
| 94 |
+
|
| 95 |
+
[INFO|2025-07-07 19:00:04] tokenization_utils_base.py:2034 >> loading file chat_template.jinja from cache at None
|
| 96 |
+
|
| 97 |
+
[INFO|2025-07-07 19:00:04] tokenization_utils_base.py:2304 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
| 98 |
+
|
| 99 |
+
[INFO|2025-07-07 19:00:04] logging.py:157 >> Add <|im_end|> to stop words.
|
| 100 |
+
|
| 101 |
+
[INFO|2025-07-07 19:00:04] logging.py:157 >> Loading dataset Codes3_query_filtered_553474_mark_less_than_8.0.json...
|
| 102 |
+
|
| 103 |
+
[INFO|2025-07-07 19:00:43] configuration_utils.py:696 >> loading configuration file config.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/config.json
|
| 104 |
+
|
| 105 |
+
[INFO|2025-07-07 19:00:43] configuration_utils.py:768 >> Model config Qwen2Config {
|
| 106 |
+
"_name_or_path": "Qwen/Qwen2.5-Coder-7B-Instruct",
|
| 107 |
+
"architectures": [
|
| 108 |
+
"Qwen2ForCausalLM"
|
| 109 |
+
],
|
| 110 |
+
"attention_dropout": 0.0,
|
| 111 |
+
"bos_token_id": 151643,
|
| 112 |
+
"eos_token_id": 151645,
|
| 113 |
+
"hidden_act": "silu",
|
| 114 |
+
"hidden_size": 3584,
|
| 115 |
+
"initializer_range": 0.02,
|
| 116 |
+
"intermediate_size": 18944,
|
| 117 |
+
"max_position_embeddings": 32768,
|
| 118 |
+
"max_window_layers": 28,
|
| 119 |
+
"model_type": "qwen2",
|
| 120 |
+
"num_attention_heads": 28,
|
| 121 |
+
"num_hidden_layers": 28,
|
| 122 |
+
"num_key_value_heads": 4,
|
| 123 |
+
"rms_norm_eps": 1e-06,
|
| 124 |
+
"rope_scaling": null,
|
| 125 |
+
"rope_theta": 1000000.0,
|
| 126 |
+
"sliding_window": null,
|
| 127 |
+
"tie_word_embeddings": false,
|
| 128 |
+
"torch_dtype": "bfloat16",
|
| 129 |
+
"transformers_version": "4.48.2",
|
| 130 |
+
"use_cache": true,
|
| 131 |
+
"use_sliding_window": false,
|
| 132 |
+
"vocab_size": 152064
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
[WARNING|2025-07-07 19:00:43] logging.py:162 >> Input length is smaller than max length. Consider increase input length.
|
| 137 |
+
|
| 138 |
+
[INFO|2025-07-07 19:00:43] logging.py:157 >> Using llama3 scaling strategy and setting scaling factor to 1.0.
|
| 139 |
+
|
| 140 |
+
[INFO|2025-07-07 19:00:43] logging.py:157 >> Using block diagonal attention for sequence packing without cross-attention.
|
| 141 |
+
|
| 142 |
+
[INFO|2025-07-07 19:00:44] logging.py:157 >> Liger kernel has been applied to the model.
|
| 143 |
+
|
| 144 |
+
[INFO|2025-07-07 19:00:44] modeling_utils.py:3904 >> loading weights file model.safetensors from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/model.safetensors.index.json
|
| 145 |
+
|
| 146 |
+
[INFO|2025-07-07 19:00:44] modeling_utils.py:1582 >> Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16.
|
| 147 |
+
|
| 148 |
+
[INFO|2025-07-07 19:00:44] configuration_utils.py:1140 >> Generate config GenerationConfig {
|
| 149 |
+
"bos_token_id": 151643,
|
| 150 |
+
"eos_token_id": 151645
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
[INFO|2025-07-07 19:00:52] modeling_utils.py:4888 >> All model checkpoint weights were used when initializing Qwen2ForCausalLM.
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
[INFO|2025-07-07 19:00:52] modeling_utils.py:4896 >> All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at Qwen/Qwen2.5-Coder-7B-Instruct.
|
| 158 |
+
If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
|
| 159 |
+
|
| 160 |
+
[INFO|2025-07-07 19:00:52] configuration_utils.py:1095 >> loading configuration file generation_config.json from cache at /home/kiho/.cache/huggingface/hub/models--Qwen--Qwen2.5-Coder-7B-Instruct/snapshots/c03e6d358207e414f1eca0bb1891e29f1db0e242/generation_config.json
|
| 161 |
+
|
| 162 |
+
[INFO|2025-07-07 19:00:52] configuration_utils.py:1140 >> Generate config GenerationConfig {
|
| 163 |
+
"bos_token_id": 151643,
|
| 164 |
+
"do_sample": true,
|
| 165 |
+
"eos_token_id": [
|
| 166 |
+
151645,
|
| 167 |
+
151643
|
| 168 |
+
],
|
| 169 |
+
"pad_token_id": 151643,
|
| 170 |
+
"repetition_penalty": 1.1,
|
| 171 |
+
"temperature": 0.7,
|
| 172 |
+
"top_k": 20,
|
| 173 |
+
"top_p": 0.8
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
[INFO|2025-07-07 19:00:52] logging.py:157 >> Gradient checkpointing enabled.
|
| 178 |
+
|
| 179 |
+
[INFO|2025-07-07 19:00:52] logging.py:157 >> Using torch SDPA for faster training and inference.
|
| 180 |
+
|
| 181 |
+
[INFO|2025-07-07 19:00:52] logging.py:157 >> Upcasting trainable params to float32.
|
| 182 |
+
|
| 183 |
+
[INFO|2025-07-07 19:00:52] logging.py:157 >> Fine-tuning method: Freeze
|
| 184 |
+
|
| 185 |
+
[INFO|2025-07-07 19:00:52] logging.py:157 >> Set trainable layers: .13.,.27.
|
| 186 |
+
|
| 187 |
+
[INFO|2025-07-07 19:00:52] logging.py:157 >> trainable params: 466,115,584 || all params: 7,615,616,512 || trainable%: 6.1205
|
| 188 |
+
|
| 189 |
+
[INFO|2025-07-07 19:00:52] trainer.py:741 >> Using auto half precision backend
|
| 190 |
+
|
| 191 |
+
[INFO|2025-07-07 19:00:53] logging.py:157 >> Found linear modules: q_proj,gate_proj,k_proj,down_proj,v_proj,o_proj,up_proj
|
| 192 |
+
|
| 193 |
+
[INFO|2025-07-07 19:00:53] logging.py:157 >> Using APOLLO optimizer with args: {'rank': 256, 'proj': 'random', 'proj_type': 'std', 'update_proj_gap': 200, 'scale': 1, 'scale_type': 'channel', 'scale_front': False}.
|
| 194 |
+
|
| 195 |
+
[INFO|2025-07-07 19:00:53] trainer.py:2369 >> ***** Running training *****
|
| 196 |
+
|
| 197 |
+
[INFO|2025-07-07 19:00:53] trainer.py:2370 >> Num examples = 23,588
|
| 198 |
+
|
| 199 |
+
[INFO|2025-07-07 19:00:53] trainer.py:2371 >> Num Epochs = 1
|
| 200 |
+
|
| 201 |
+
[INFO|2025-07-07 19:00:53] trainer.py:2372 >> Instantaneous batch size per device = 16
|
| 202 |
+
|
| 203 |
+
[INFO|2025-07-07 19:00:53] trainer.py:2375 >> Total train batch size (w. parallel, distributed & accumulation) = 384
|
| 204 |
+
|
| 205 |
+
[INFO|2025-07-07 19:00:53] trainer.py:2376 >> Gradient Accumulation steps = 8
|
| 206 |
+
|
| 207 |
+
[INFO|2025-07-07 19:00:53] trainer.py:2377 >> Total optimization steps = 61
|
| 208 |
+
|
| 209 |
+
[INFO|2025-07-07 19:00:53] trainer.py:2378 >> Number of trainable parameters = 466,115,584
|
| 210 |
+
|
| 211 |
+
[INFO|2025-07-07 19:03:35] logging.py:157 >> {'loss': 0.8835, 'learning_rate': 4.9967e-05, 'epoch': 0.02, 'throughput': 9741.84}
|
| 212 |
+
|
| 213 |
+
[INFO|2025-07-07 19:06:09] logging.py:157 >> {'loss': 0.8172, 'learning_rate': 4.9867e-05, 'epoch': 0.03, 'throughput': 9978.43}
|
| 214 |
+
|
| 215 |
+
[INFO|2025-07-07 19:08:43] logging.py:157 >> {'loss': 0.7415, 'learning_rate': 4.9702e-05, 'epoch': 0.05, 'throughput': 10069.50}
|
| 216 |
+
|
| 217 |
+
[INFO|2025-07-07 19:11:16] logging.py:157 >> {'loss': 0.7198, 'learning_rate': 4.9471e-05, 'epoch': 0.07, 'throughput': 10117.11}
|
| 218 |
+
|
| 219 |
+
[INFO|2025-07-07 19:13:50] logging.py:157 >> {'loss': 0.6985, 'learning_rate': 4.9176e-05, 'epoch': 0.08, 'throughput': 10129.94}
|
| 220 |
+
|
| 221 |
+
[INFO|2025-07-07 19:16:24] logging.py:157 >> {'loss': 0.6642, 'learning_rate': 4.8816e-05, 'epoch': 0.10, 'throughput': 10146.98}
|
| 222 |
+
|
| 223 |
+
[INFO|2025-07-07 19:18:58] logging.py:157 >> {'loss': 0.6677, 'learning_rate': 4.8393e-05, 'epoch': 0.11, 'throughput': 10159.48}
|
| 224 |
+
|
| 225 |
+
[INFO|2025-07-07 19:21:32] logging.py:157 >> {'loss': 0.6451, 'learning_rate': 4.7908e-05, 'epoch': 0.13, 'throughput': 10164.48}
|
| 226 |
+
|
| 227 |
+
[INFO|2025-07-07 19:24:06] logging.py:157 >> {'loss': 0.6327, 'learning_rate': 4.7362e-05, 'epoch': 0.15, 'throughput': 10172.04}
|
| 228 |
+
|
| 229 |
+
[INFO|2025-07-07 19:26:41] logging.py:157 >> {'loss': 0.6331, 'learning_rate': 4.6757e-05, 'epoch': 0.16, 'throughput': 10169.75}
|
| 230 |
+
|
| 231 |
+
[INFO|2025-07-07 19:29:17] logging.py:157 >> {'loss': 0.6219, 'learning_rate': 4.6094e-05, 'epoch': 0.18, 'throughput': 10161.43}
|
| 232 |
+
|
| 233 |
+
[INFO|2025-07-07 19:31:50] logging.py:157 >> {'loss': 0.6205, 'learning_rate': 4.5376e-05, 'epoch': 0.20, 'throughput': 10168.14}
|
| 234 |
+
|
| 235 |
+
[INFO|2025-07-07 19:34:24] logging.py:157 >> {'loss': 0.6010, 'learning_rate': 4.4603e-05, 'epoch': 0.21, 'throughput': 10172.37}
|
| 236 |
+
|
| 237 |
+
[INFO|2025-07-07 19:36:58] logging.py:157 >> {'loss': 0.6077, 'learning_rate': 4.3778e-05, 'epoch': 0.23, 'throughput': 10175.33}
|
| 238 |
+
|
| 239 |
+
[INFO|2025-07-07 19:39:32] logging.py:157 >> {'loss': 0.5958, 'learning_rate': 4.2904e-05, 'epoch': 0.24, 'throughput': 10179.71}
|
| 240 |
+
|
| 241 |
+
[INFO|2025-07-07 19:42:05] logging.py:157 >> {'loss': 0.5838, 'learning_rate': 4.1982e-05, 'epoch': 0.26, 'throughput': 10183.00}
|
| 242 |
+
|
| 243 |
+
[INFO|2025-07-07 19:44:39] logging.py:157 >> {'loss': 0.5629, 'learning_rate': 4.1015e-05, 'epoch': 0.28, 'throughput': 10185.76}
|
| 244 |
+
|
| 245 |
+
[INFO|2025-07-07 19:47:13] logging.py:157 >> {'loss': 0.5848, 'learning_rate': 4.0005e-05, 'epoch': 0.29, 'throughput': 10188.71}
|
| 246 |
+
|
| 247 |
+
[INFO|2025-07-07 19:49:47] logging.py:157 >> {'loss': 0.5772, 'learning_rate': 3.8956e-05, 'epoch': 0.31, 'throughput': 10190.23}
|
| 248 |
+
|
| 249 |
+
[INFO|2025-07-07 19:52:20] logging.py:157 >> {'loss': 0.5719, 'learning_rate': 3.7870e-05, 'epoch': 0.33, 'throughput': 10192.39}
|
| 250 |
+
|
| 251 |
+
[INFO|2025-07-07 19:54:54] logging.py:157 >> {'loss': 0.5445, 'learning_rate': 3.6749e-05, 'epoch': 0.34, 'throughput': 10195.72}
|
| 252 |
+
|
| 253 |
+
[INFO|2025-07-07 19:57:27] logging.py:157 >> {'loss': 0.5560, 'learning_rate': 3.5598e-05, 'epoch': 0.36, 'throughput': 10198.63}
|
| 254 |
+
|
| 255 |
+
[INFO|2025-07-07 20:00:01] logging.py:157 >> {'loss': 0.5736, 'learning_rate': 3.4418e-05, 'epoch': 0.37, 'throughput': 10199.96}
|
| 256 |
+
|
| 257 |
+
[INFO|2025-07-07 20:02:34] logging.py:157 >> {'loss': 0.5350, 'learning_rate': 3.3214e-05, 'epoch': 0.39, 'throughput': 10203.32}
|
| 258 |
+
|
| 259 |
+
[INFO|2025-07-07 20:05:07] logging.py:157 >> {'loss': 0.5634, 'learning_rate': 3.1987e-05, 'epoch': 0.41, 'throughput': 10205.81}
|
| 260 |
+
|
| 261 |
+
[INFO|2025-07-07 20:07:42] logging.py:157 >> {'loss': 0.5648, 'learning_rate': 3.0742e-05, 'epoch': 0.42, 'throughput': 10203.48}
|
| 262 |
+
|
| 263 |
+
[INFO|2025-07-07 20:10:16] logging.py:157 >> {'loss': 0.5467, 'learning_rate': 2.9482e-05, 'epoch': 0.44, 'throughput': 10202.72}
|
| 264 |
+
|
| 265 |
+
[INFO|2025-07-07 20:12:50] logging.py:157 >> {'loss': 0.5567, 'learning_rate': 2.8210e-05, 'epoch': 0.46, 'throughput': 10203.80}
|
| 266 |
+
|
| 267 |
+
[INFO|2025-07-07 20:15:24] logging.py:157 >> {'loss': 0.5847, 'learning_rate': 2.6929e-05, 'epoch': 0.47, 'throughput': 10203.36}
|
| 268 |
+
|
| 269 |
+
[INFO|2025-07-07 20:17:58] logging.py:157 >> {'loss': 0.5429, 'learning_rate': 2.5644e-05, 'epoch': 0.49, 'throughput': 10204.19}
|
| 270 |
+
|
| 271 |
+
[INFO|2025-07-07 20:20:32] logging.py:157 >> {'loss': 0.5435, 'learning_rate': 2.4356e-05, 'epoch': 0.50, 'throughput': 10204.68}
|
| 272 |
+
|
| 273 |
+
[INFO|2025-07-07 20:23:06] logging.py:157 >> {'loss': 0.5482, 'learning_rate': 2.3071e-05, 'epoch': 0.52, 'throughput': 10205.83}
|
| 274 |
+
|
| 275 |
+
[INFO|2025-07-07 20:25:40] logging.py:157 >> {'loss': 0.5418, 'learning_rate': 2.1790e-05, 'epoch': 0.54, 'throughput': 10206.16}
|
| 276 |
+
|
| 277 |
+
[INFO|2025-07-07 20:28:14] logging.py:157 >> {'loss': 0.5320, 'learning_rate': 2.0518e-05, 'epoch': 0.55, 'throughput': 10205.44}
|
| 278 |
+
|
| 279 |
+
[INFO|2025-07-07 20:30:48] logging.py:157 >> {'loss': 0.5360, 'learning_rate': 1.9258e-05, 'epoch': 0.57, 'throughput': 10205.42}
|
| 280 |
+
|
| 281 |
+
[INFO|2025-07-07 20:33:23] logging.py:157 >> {'loss': 0.5314, 'learning_rate': 1.8013e-05, 'epoch': 0.59, 'throughput': 10204.05}
|
| 282 |
+
|
| 283 |
+
[INFO|2025-07-07 20:35:56] logging.py:157 >> {'loss': 0.5595, 'learning_rate': 1.6786e-05, 'epoch': 0.60, 'throughput': 10205.77}
|
| 284 |
+
|
| 285 |
+
[INFO|2025-07-07 20:38:32] logging.py:157 >> {'loss': 0.5418, 'learning_rate': 1.5582e-05, 'epoch': 0.62, 'throughput': 10203.12}
|
| 286 |
+
|
| 287 |
+
[INFO|2025-07-07 20:41:06] logging.py:157 >> {'loss': 0.5438, 'learning_rate': 1.4402e-05, 'epoch': 0.63, 'throughput': 10203.74}
|
| 288 |
+
|
| 289 |
+
[INFO|2025-07-07 20:43:39] logging.py:157 >> {'loss': 0.5239, 'learning_rate': 1.3251e-05, 'epoch': 0.65, 'throughput': 10204.70}
|
| 290 |
+
|
| 291 |
+
[INFO|2025-07-07 20:46:13] logging.py:157 >> {'loss': 0.5459, 'learning_rate': 1.2130e-05, 'epoch': 0.67, 'throughput': 10204.78}
|
| 292 |
+
|
| 293 |
+
[INFO|2025-07-07 20:48:47] logging.py:157 >> {'loss': 0.5373, 'learning_rate': 1.1044e-05, 'epoch': 0.68, 'throughput': 10204.67}
|
| 294 |
+
|
| 295 |
+
[INFO|2025-07-07 20:51:22] logging.py:157 >> {'loss': 0.5474, 'learning_rate': 9.9946e-06, 'epoch': 0.70, 'throughput': 10203.24}
|
| 296 |
+
|
| 297 |
+
[INFO|2025-07-07 20:53:56] logging.py:157 >> {'loss': 0.5236, 'learning_rate': 8.9852e-06, 'epoch': 0.72, 'throughput': 10203.82}
|
| 298 |
+
|
| 299 |
+
[INFO|2025-07-07 20:56:29] logging.py:157 >> {'loss': 0.5336, 'learning_rate': 8.0182e-06, 'epoch': 0.73, 'throughput': 10205.18}
|
| 300 |
+
|
| 301 |
+
[INFO|2025-07-07 20:59:05] logging.py:157 >> {'loss': 0.5198, 'learning_rate': 7.0962e-06, 'epoch': 0.75, 'throughput': 10203.27}
|
| 302 |
+
|
| 303 |
+
[INFO|2025-07-07 21:01:40] logging.py:157 >> {'loss': 0.5419, 'learning_rate': 6.2217e-06, 'epoch': 0.76, 'throughput': 10202.64}
|
| 304 |
+
|
| 305 |
+
[INFO|2025-07-07 21:04:17] logging.py:157 >> {'loss': 0.5544, 'learning_rate': 5.3970e-06, 'epoch': 0.78, 'throughput': 10198.78}
|
| 306 |
+
|
| 307 |
+
[INFO|2025-07-07 21:06:50] logging.py:157 >> {'loss': 0.5689, 'learning_rate': 4.6243e-06, 'epoch': 0.80, 'throughput': 10200.12}
|
| 308 |
+
|
| 309 |
+
[INFO|2025-07-07 21:09:24] logging.py:157 >> {'loss': 0.5170, 'learning_rate': 3.9056e-06, 'epoch': 0.81, 'throughput': 10200.21}
|
| 310 |
+
|
| 311 |
+
[INFO|2025-07-07 21:11:58] logging.py:157 >> {'loss': 0.5440, 'learning_rate': 3.2429e-06, 'epoch': 0.83, 'throughput': 10200.78}
|
| 312 |
+
|
| 313 |
+
[INFO|2025-07-07 21:14:34] logging.py:157 >> {'loss': 0.5265, 'learning_rate': 2.6378e-06, 'epoch': 0.85, 'throughput': 10198.05}
|
| 314 |
+
|
| 315 |
+
[INFO|2025-07-07 21:17:09] logging.py:157 >> {'loss': 0.5301, 'learning_rate': 2.0921e-06, 'epoch': 0.86, 'throughput': 10197.60}
|
| 316 |
+
|
| 317 |
+
[INFO|2025-07-07 21:19:44] logging.py:157 >> {'loss': 0.5357, 'learning_rate': 1.6071e-06, 'epoch': 0.88, 'throughput': 10196.24}
|
| 318 |
+
|
| 319 |
+
[INFO|2025-07-07 21:22:18] logging.py:157 >> {'loss': 0.5220, 'learning_rate': 1.1841e-06, 'epoch': 0.89, 'throughput': 10196.15}
|
| 320 |
+
|
| 321 |
+
[INFO|2025-07-07 21:24:54] logging.py:157 >> {'loss': 0.5282, 'learning_rate': 8.2431e-07, 'epoch': 0.91, 'throughput': 10194.76}
|
| 322 |
+
|
| 323 |
+
[INFO|2025-07-07 21:27:28] logging.py:157 >> {'loss': 0.5327, 'learning_rate': 5.2861e-07, 'epoch': 0.93, 'throughput': 10195.16}
|
| 324 |
+
|
| 325 |
+
[INFO|2025-07-07 21:30:02] logging.py:157 >> {'loss': 0.5237, 'learning_rate': 2.9780e-07, 'epoch': 0.94, 'throughput': 10195.52}
|
| 326 |
+
|
| 327 |
+
[INFO|2025-07-07 21:32:36] logging.py:157 >> {'loss': 0.5499, 'learning_rate': 1.3250e-07, 'epoch': 0.96, 'throughput': 10195.79}
|
| 328 |
+
|
| 329 |
+
[INFO|2025-07-07 21:35:10] logging.py:157 >> {'loss': 0.5576, 'learning_rate': 3.3148e-08, 'epoch': 0.98, 'throughput': 10195.29}
|
| 330 |
+
|
| 331 |
+
[INFO|2025-07-07 21:37:45] logging.py:157 >> {'loss': 0.5487, 'learning_rate': 0.0000e+00, 'epoch': 0.99, 'throughput': 10195.34}
|
| 332 |
+
|
| 333 |
+
[INFO|2025-07-07 21:37:45] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/checkpoint-61
|
| 334 |
+
|
| 335 |
+
[INFO|2025-07-07 21:37:45] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/checkpoint-61/config.json
|
| 336 |
+
|
| 337 |
+
[INFO|2025-07-07 21:37:45] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/checkpoint-61/generation_config.json
|
| 338 |
+
|
| 339 |
+
[INFO|2025-07-07 21:38:10] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 4 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/checkpoint-61/model.safetensors.index.json.
|
| 340 |
+
|
| 341 |
+
[INFO|2025-07-07 21:38:10] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/checkpoint-61/tokenizer_config.json
|
| 342 |
+
|
| 343 |
+
[INFO|2025-07-07 21:38:10] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/checkpoint-61/special_tokens_map.json
|
| 344 |
+
|
| 345 |
+
[INFO|2025-07-07 21:38:10] trainer.py:2643 >>
|
| 346 |
+
|
| 347 |
+
Training completed. Do not forget to share your model on huggingface.co/models =)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
[INFO|2025-07-07 21:38:10] trainer.py:3910 >> Saving model checkpoint to saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx
|
| 352 |
+
|
| 353 |
+
[INFO|2025-07-07 21:38:10] configuration_utils.py:420 >> Configuration saved in saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/config.json
|
| 354 |
+
|
| 355 |
+
[INFO|2025-07-07 21:38:10] configuration_utils.py:909 >> Configuration saved in saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/generation_config.json
|
| 356 |
+
|
| 357 |
+
[INFO|2025-07-07 21:38:35] modeling_utils.py:2996 >> The model is bigger than the maximum size per checkpoint (5GB) and is going to be split in 4 checkpoint shards. You can find where each parameters has been saved in the index located at saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/model.safetensors.index.json.
|
| 358 |
+
|
| 359 |
+
[INFO|2025-07-07 21:38:35] tokenization_utils_base.py:2491 >> tokenizer config file saved in saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/tokenizer_config.json
|
| 360 |
+
|
| 361 |
+
[INFO|2025-07-07 21:38:35] tokenization_utils_base.py:2500 >> Special tokens file saved in saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx/special_tokens_map.json
|
| 362 |
+
|
| 363 |
+
[WARNING|2025-07-07 21:38:35] logging.py:162 >> No metric eval_loss to plot.
|
| 364 |
+
|
| 365 |
+
[WARNING|2025-07-07 21:38:35] logging.py:162 >> No metric eval_accuracy to plot.
|
| 366 |
+
|
| 367 |
+
[INFO|2025-07-07 21:38:35] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields:
|
| 368 |
+
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
| 369 |
+
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
|
| 3 |
+
size 11421896
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
"additional_special_tokens": [
|
| 183 |
+
"<|im_start|>",
|
| 184 |
+
"<|im_end|>",
|
| 185 |
+
"<|object_ref_start|>",
|
| 186 |
+
"<|object_ref_end|>",
|
| 187 |
+
"<|box_start|>",
|
| 188 |
+
"<|box_end|>",
|
| 189 |
+
"<|quad_start|>",
|
| 190 |
+
"<|quad_end|>",
|
| 191 |
+
"<|vision_start|>",
|
| 192 |
+
"<|vision_end|>",
|
| 193 |
+
"<|vision_pad|>",
|
| 194 |
+
"<|image_pad|>",
|
| 195 |
+
"<|video_pad|>"
|
| 196 |
+
],
|
| 197 |
+
"bos_token": null,
|
| 198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 199 |
+
"clean_up_tokenization_spaces": false,
|
| 200 |
+
"eos_token": "<|im_end|>",
|
| 201 |
+
"errors": "replace",
|
| 202 |
+
"extra_special_tokens": {},
|
| 203 |
+
"model_max_length": 4096,
|
| 204 |
+
"pad_token": "<|endoftext|>",
|
| 205 |
+
"padding_side": "right",
|
| 206 |
+
"split_special_tokens": false,
|
| 207 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 208 |
+
"unk_token": null
|
| 209 |
+
}
|
train_results.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 0.991869918699187,
|
| 3 |
+
"num_input_tokens_seen": 95944704,
|
| 4 |
+
"total_flos": 4.0703306623541576e+18,
|
| 5 |
+
"train_loss": 0.5785222493234228,
|
| 6 |
+
"train_runtime": 9437.3939,
|
| 7 |
+
"train_samples_per_second": 2.499,
|
| 8 |
+
"train_steps_per_second": 0.006
|
| 9 |
+
}
|
trainer_log.jsonl
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"current_steps": 1, "total_steps": 61, "loss": 0.8835, "lr": 4.9966852247120764e-05, "epoch": 0.016260162601626018, "percentage": 1.64, "elapsed_time": "0:02:41", "remaining_time": "2:41:27", "throughput": 9741.84, "total_tokens": 1572864}
|
| 2 |
+
{"current_steps": 2, "total_steps": 61, "loss": 0.8172, "lr": 4.9867496890364726e-05, "epoch": 0.032520325203252036, "percentage": 3.28, "elapsed_time": "0:05:15", "remaining_time": "2:34:59", "throughput": 9978.43, "total_tokens": 3145728}
|
| 3 |
+
{"current_steps": 3, "total_steps": 61, "loss": 0.7415, "lr": 4.970219740227693e-05, "epoch": 0.04878048780487805, "percentage": 4.92, "elapsed_time": "0:07:48", "remaining_time": "2:30:59", "throughput": 10069.5, "total_tokens": 4718592}
|
| 4 |
+
{"current_steps": 4, "total_steps": 61, "loss": 0.7198, "lr": 4.947139212738395e-05, "epoch": 0.06504065040650407, "percentage": 6.56, "elapsed_time": "0:10:21", "remaining_time": "2:27:41", "throughput": 10117.11, "total_tokens": 6291456}
|
| 5 |
+
{"current_steps": 5, "total_steps": 61, "loss": 0.6985, "lr": 4.9175693119783013e-05, "epoch": 0.08130081300813008, "percentage": 8.2, "elapsed_time": "0:12:56", "remaining_time": "2:24:55", "throughput": 10129.94, "total_tokens": 7864320}
|
| 6 |
+
{"current_steps": 6, "total_steps": 61, "loss": 0.6642, "lr": 4.881588452008456e-05, "epoch": 0.0975609756097561, "percentage": 9.84, "elapsed_time": "0:15:30", "remaining_time": "2:22:05", "throughput": 10146.98, "total_tokens": 9437184}
|
| 7 |
+
{"current_steps": 7, "total_steps": 61, "loss": 0.6677, "lr": 4.839292047601234e-05, "epoch": 0.11382113821138211, "percentage": 11.48, "elapsed_time": "0:18:03", "remaining_time": "2:19:20", "throughput": 10159.48, "total_tokens": 11010048}
|
| 8 |
+
{"current_steps": 8, "total_steps": 61, "loss": 0.6451, "lr": 4.790792261217512e-05, "epoch": 0.13008130081300814, "percentage": 13.11, "elapsed_time": "0:20:37", "remaining_time": "2:16:41", "throughput": 10164.48, "total_tokens": 12582912}
|
| 9 |
+
{"current_steps": 9, "total_steps": 61, "loss": 0.6327, "lr": 4.736217705571989e-05, "epoch": 0.14634146341463414, "percentage": 14.75, "elapsed_time": "0:23:11", "remaining_time": "2:14:00", "throughput": 10172.04, "total_tokens": 14155776}
|
| 10 |
+
{"current_steps": 10, "total_steps": 61, "loss": 0.6331, "lr": 4.6757131025753886e-05, "epoch": 0.16260162601626016, "percentage": 16.39, "elapsed_time": "0:25:46", "remaining_time": "2:11:27", "throughput": 10169.75, "total_tokens": 15728640}
|
| 11 |
+
{"current_steps": 11, "total_steps": 61, "loss": 0.6219, "lr": 4.609438899557964e-05, "epoch": 0.17886178861788618, "percentage": 18.03, "elapsed_time": "0:28:22", "remaining_time": "2:08:59", "throughput": 10161.43, "total_tokens": 17301504}
|
| 12 |
+
{"current_steps": 12, "total_steps": 61, "loss": 0.6205, "lr": 4.5375708437920284e-05, "epoch": 0.1951219512195122, "percentage": 19.67, "elapsed_time": "0:30:56", "remaining_time": "2:06:19", "throughput": 10168.14, "total_tokens": 18874368}
|
| 13 |
+
{"current_steps": 13, "total_steps": 61, "loss": 0.601, "lr": 4.460299516441777e-05, "epoch": 0.21138211382113822, "percentage": 21.31, "elapsed_time": "0:33:30", "remaining_time": "2:03:41", "throughput": 10172.37, "total_tokens": 20447232}
|
| 14 |
+
{"current_steps": 14, "total_steps": 61, "loss": 0.6077, "lr": 4.3778298271762995e-05, "epoch": 0.22764227642276422, "percentage": 22.95, "elapsed_time": "0:36:04", "remaining_time": "2:01:05", "throughput": 10175.33, "total_tokens": 22020096}
|
| 15 |
+
{"current_steps": 15, "total_steps": 61, "loss": 0.5958, "lr": 4.2903804707859835e-05, "epoch": 0.24390243902439024, "percentage": 24.59, "elapsed_time": "0:38:37", "remaining_time": "1:58:27", "throughput": 10179.71, "total_tokens": 23592960}
|
| 16 |
+
{"current_steps": 16, "total_steps": 61, "loss": 0.5838, "lr": 4.198183347243233e-05, "epoch": 0.2601626016260163, "percentage": 26.23, "elapsed_time": "0:41:11", "remaining_time": "1:55:50", "throughput": 10183.0, "total_tokens": 25165824}
|
| 17 |
+
{"current_steps": 17, "total_steps": 61, "loss": 0.5629, "lr": 4.101482946745439e-05, "epoch": 0.2764227642276423, "percentage": 27.87, "elapsed_time": "0:43:45", "remaining_time": "1:53:14", "throughput": 10185.76, "total_tokens": 26738688}
|
| 18 |
+
{"current_steps": 18, "total_steps": 61, "loss": 0.5848, "lr": 4.000535701370921e-05, "epoch": 0.2926829268292683, "percentage": 29.51, "elapsed_time": "0:46:18", "remaining_time": "1:50:38", "throughput": 10188.71, "total_tokens": 28311552}
|
| 19 |
+
{"current_steps": 19, "total_steps": 61, "loss": 0.5772, "lr": 3.895609305067162e-05, "epoch": 0.3089430894308943, "percentage": 31.15, "elapsed_time": "0:48:52", "remaining_time": "1:48:02", "throughput": 10190.23, "total_tokens": 29884416}
|
| 20 |
+
{"current_steps": 20, "total_steps": 61, "loss": 0.5719, "lr": 3.7869820037745776e-05, "epoch": 0.3252032520325203, "percentage": 32.79, "elapsed_time": "0:51:26", "remaining_time": "1:45:27", "throughput": 10192.39, "total_tokens": 31457280}
|
| 21 |
+
{"current_steps": 21, "total_steps": 61, "loss": 0.5445, "lr": 3.6749418575683e-05, "epoch": 0.34146341463414637, "percentage": 34.43, "elapsed_time": "0:53:59", "remaining_time": "1:42:50", "throughput": 10195.72, "total_tokens": 33030144}
|
| 22 |
+
{"current_steps": 22, "total_steps": 61, "loss": 0.556, "lr": 3.5597859767746524e-05, "epoch": 0.35772357723577236, "percentage": 36.07, "elapsed_time": "0:56:32", "remaining_time": "1:40:14", "throughput": 10198.63, "total_tokens": 34603008}
|
| 23 |
+
{"current_steps": 23, "total_steps": 61, "loss": 0.5736, "lr": 3.4418197340879635e-05, "epoch": 0.37398373983739835, "percentage": 37.7, "elapsed_time": "0:59:06", "remaining_time": "1:37:39", "throughput": 10199.96, "total_tokens": 36175872}
|
| 24 |
+
{"current_steps": 24, "total_steps": 61, "loss": 0.535, "lr": 3.321355954777087e-05, "epoch": 0.3902439024390244, "percentage": 39.34, "elapsed_time": "1:01:39", "remaining_time": "1:35:03", "throughput": 10203.32, "total_tokens": 37748736}
|
| 25 |
+
{"current_steps": 25, "total_steps": 61, "loss": 0.5634, "lr": 3.1987140871290236e-05, "epoch": 0.4065040650406504, "percentage": 40.98, "elapsed_time": "1:04:12", "remaining_time": "1:32:28", "throughput": 10205.81, "total_tokens": 39321600}
|
| 26 |
+
{"current_steps": 26, "total_steps": 61, "loss": 0.5648, "lr": 3.07421935532949e-05, "epoch": 0.42276422764227645, "percentage": 42.62, "elapsed_time": "1:06:47", "remaining_time": "1:29:55", "throughput": 10203.48, "total_tokens": 40894464}
|
| 27 |
+
{"current_steps": 27, "total_steps": 61, "loss": 0.5467, "lr": 2.9482018970268393e-05, "epoch": 0.43902439024390244, "percentage": 44.26, "elapsed_time": "1:09:22", "remaining_time": "1:27:21", "throughput": 10202.72, "total_tokens": 42467328}
|
| 28 |
+
{"current_steps": 28, "total_steps": 61, "loss": 0.5567, "lr": 2.8209958878663778e-05, "epoch": 0.45528455284552843, "percentage": 45.9, "elapsed_time": "1:11:56", "remaining_time": "1:24:46", "throughput": 10203.8, "total_tokens": 44040192}
|
| 29 |
+
{"current_steps": 29, "total_steps": 61, "loss": 0.5847, "lr": 2.6929386553166164e-05, "epoch": 0.4715447154471545, "percentage": 47.54, "elapsed_time": "1:14:30", "remaining_time": "1:22:12", "throughput": 10203.36, "total_tokens": 45613056}
|
| 30 |
+
{"current_steps": 30, "total_steps": 61, "loss": 0.5429, "lr": 2.564369784137472e-05, "epoch": 0.4878048780487805, "percentage": 49.18, "elapsed_time": "1:17:04", "remaining_time": "1:19:38", "throughput": 10204.19, "total_tokens": 47185920}
|
| 31 |
+
{"current_steps": 31, "total_steps": 61, "loss": 0.5435, "lr": 2.4356302158625288e-05, "epoch": 0.5040650406504065, "percentage": 50.82, "elapsed_time": "1:19:38", "remaining_time": "1:17:03", "throughput": 10204.68, "total_tokens": 48758784}
|
| 32 |
+
{"current_steps": 32, "total_steps": 61, "loss": 0.5482, "lr": 2.3070613446833842e-05, "epoch": 0.5203252032520326, "percentage": 52.46, "elapsed_time": "1:22:11", "remaining_time": "1:14:29", "throughput": 10205.83, "total_tokens": 50331648}
|
| 33 |
+
{"current_steps": 33, "total_steps": 61, "loss": 0.5418, "lr": 2.1790041121336225e-05, "epoch": 0.5365853658536586, "percentage": 54.1, "elapsed_time": "1:24:45", "remaining_time": "1:11:55", "throughput": 10206.16, "total_tokens": 51904512}
|
| 34 |
+
{"current_steps": 34, "total_steps": 61, "loss": 0.532, "lr": 2.0517981029731616e-05, "epoch": 0.5528455284552846, "percentage": 55.74, "elapsed_time": "1:27:20", "remaining_time": "1:09:21", "throughput": 10205.44, "total_tokens": 53477376}
|
| 35 |
+
{"current_steps": 35, "total_steps": 61, "loss": 0.536, "lr": 1.9257806446705116e-05, "epoch": 0.5691056910569106, "percentage": 57.38, "elapsed_time": "1:29:54", "remaining_time": "1:06:47", "throughput": 10205.42, "total_tokens": 55050240}
|
| 36 |
+
{"current_steps": 36, "total_steps": 61, "loss": 0.5314, "lr": 1.8012859128709766e-05, "epoch": 0.5853658536585366, "percentage": 59.02, "elapsed_time": "1:32:29", "remaining_time": "1:04:13", "throughput": 10204.05, "total_tokens": 56623104}
|
| 37 |
+
{"current_steps": 37, "total_steps": 61, "loss": 0.5595, "lr": 1.6786440452229134e-05, "epoch": 0.6016260162601627, "percentage": 60.66, "elapsed_time": "1:35:02", "remaining_time": "1:01:38", "throughput": 10205.77, "total_tokens": 58195968}
|
| 38 |
+
{"current_steps": 38, "total_steps": 61, "loss": 0.5418, "lr": 1.558180265912037e-05, "epoch": 0.6178861788617886, "percentage": 62.3, "elapsed_time": "1:37:37", "remaining_time": "0:59:05", "throughput": 10203.12, "total_tokens": 59768832}
|
| 39 |
+
{"current_steps": 39, "total_steps": 61, "loss": 0.5438, "lr": 1.4402140232253486e-05, "epoch": 0.6341463414634146, "percentage": 63.93, "elapsed_time": "1:40:11", "remaining_time": "0:56:31", "throughput": 10203.74, "total_tokens": 61341696}
|
| 40 |
+
{"current_steps": 40, "total_steps": 61, "loss": 0.5239, "lr": 1.325058142431701e-05, "epoch": 0.6504065040650406, "percentage": 65.57, "elapsed_time": "1:42:45", "remaining_time": "0:53:56", "throughput": 10204.7, "total_tokens": 62914560}
|
| 41 |
+
{"current_steps": 41, "total_steps": 61, "loss": 0.5459, "lr": 1.213017996225424e-05, "epoch": 0.6666666666666666, "percentage": 67.21, "elapsed_time": "1:45:19", "remaining_time": "0:51:22", "throughput": 10204.78, "total_tokens": 64487424}
|
| 42 |
+
{"current_steps": 42, "total_steps": 61, "loss": 0.5373, "lr": 1.1043906949328387e-05, "epoch": 0.6829268292682927, "percentage": 68.85, "elapsed_time": "1:47:53", "remaining_time": "0:48:48", "throughput": 10204.67, "total_tokens": 66060288}
|
| 43 |
+
{"current_steps": 43, "total_steps": 61, "loss": 0.5474, "lr": 9.994642986290797e-06, "epoch": 0.6991869918699187, "percentage": 70.49, "elapsed_time": "1:50:28", "remaining_time": "0:46:14", "throughput": 10203.24, "total_tokens": 67633152}
|
| 44 |
+
{"current_steps": 44, "total_steps": 61, "loss": 0.5236, "lr": 8.985170532545622e-06, "epoch": 0.7154471544715447, "percentage": 72.13, "elapsed_time": "1:53:02", "remaining_time": "0:43:40", "throughput": 10203.82, "total_tokens": 69206016}
|
| 45 |
+
{"current_steps": 45, "total_steps": 61, "loss": 0.5336, "lr": 8.018166527567672e-06, "epoch": 0.7317073170731707, "percentage": 73.77, "elapsed_time": "1:55:35", "remaining_time": "0:41:05", "throughput": 10205.18, "total_tokens": 70778880}
|
| 46 |
+
{"current_steps": 46, "total_steps": 61, "loss": 0.5198, "lr": 7.096195292140173e-06, "epoch": 0.7479674796747967, "percentage": 75.41, "elapsed_time": "1:58:11", "remaining_time": "0:38:32", "throughput": 10203.27, "total_tokens": 72351744}
|
| 47 |
+
{"current_steps": 47, "total_steps": 61, "loss": 0.5419, "lr": 6.221701728237009e-06, "epoch": 0.7642276422764228, "percentage": 77.05, "elapsed_time": "2:00:45", "remaining_time": "0:35:58", "throughput": 10202.64, "total_tokens": 73924608}
|
| 48 |
+
{"current_steps": 48, "total_steps": 61, "loss": 0.5544, "lr": 5.397004835582242e-06, "epoch": 0.7804878048780488, "percentage": 78.69, "elapsed_time": "2:03:22", "remaining_time": "0:33:24", "throughput": 10198.78, "total_tokens": 75497472}
|
| 49 |
+
{"current_steps": 49, "total_steps": 61, "loss": 0.5689, "lr": 4.624291562079719e-06, "epoch": 0.7967479674796748, "percentage": 80.33, "elapsed_time": "2:05:55", "remaining_time": "0:30:50", "throughput": 10200.12, "total_tokens": 77070336}
|
| 50 |
+
{"current_steps": 50, "total_steps": 61, "loss": 0.517, "lr": 3.90561100442036e-06, "epoch": 0.8130081300813008, "percentage": 81.97, "elapsed_time": "2:08:29", "remaining_time": "0:28:16", "throughput": 10200.21, "total_tokens": 78643200}
|
| 51 |
+
{"current_steps": 51, "total_steps": 61, "loss": 0.544, "lr": 3.2428689742461188e-06, "epoch": 0.8292682926829268, "percentage": 83.61, "elapsed_time": "2:11:03", "remaining_time": "0:25:41", "throughput": 10200.78, "total_tokens": 80216064}
|
| 52 |
+
{"current_steps": 52, "total_steps": 61, "loss": 0.5265, "lr": 2.637822944280116e-06, "epoch": 0.8455284552845529, "percentage": 85.25, "elapsed_time": "2:13:40", "remaining_time": "0:23:08", "throughput": 10198.05, "total_tokens": 81788928}
|
| 53 |
+
{"current_steps": 53, "total_steps": 61, "loss": 0.5301, "lr": 2.092077387824884e-06, "epoch": 0.8617886178861789, "percentage": 86.89, "elapsed_time": "2:16:14", "remaining_time": "0:20:33", "throughput": 10197.6, "total_tokens": 83361792}
|
| 54 |
+
{"current_steps": 54, "total_steps": 61, "loss": 0.5357, "lr": 1.6070795239876618e-06, "epoch": 0.8780487804878049, "percentage": 88.52, "elapsed_time": "2:18:49", "remaining_time": "0:17:59", "throughput": 10196.24, "total_tokens": 84934656}
|
| 55 |
+
{"current_steps": 55, "total_steps": 61, "loss": 0.522, "lr": 1.1841154799154374e-06, "epoch": 0.8943089430894309, "percentage": 90.16, "elapsed_time": "2:21:24", "remaining_time": "0:15:25", "throughput": 10196.15, "total_tokens": 86507520}
|
| 56 |
+
{"current_steps": 56, "total_steps": 61, "loss": 0.5282, "lr": 8.243068802169906e-07, "epoch": 0.9105691056910569, "percentage": 91.8, "elapsed_time": "2:23:59", "remaining_time": "0:12:51", "throughput": 10194.76, "total_tokens": 88080384}
|
| 57 |
+
{"current_steps": 57, "total_steps": 61, "loss": 0.5327, "lr": 5.286078726160549e-07, "epoch": 0.926829268292683, "percentage": 93.44, "elapsed_time": "2:26:33", "remaining_time": "0:10:17", "throughput": 10195.16, "total_tokens": 89653248}
|
| 58 |
+
{"current_steps": 58, "total_steps": 61, "loss": 0.5237, "lr": 2.978025977230736e-07, "epoch": 0.943089430894309, "percentage": 95.08, "elapsed_time": "2:29:07", "remaining_time": "0:07:42", "throughput": 10195.52, "total_tokens": 91226112}
|
| 59 |
+
{"current_steps": 59, "total_steps": 61, "loss": 0.5499, "lr": 1.3250310963527358e-07, "epoch": 0.959349593495935, "percentage": 96.72, "elapsed_time": "2:31:41", "remaining_time": "0:05:08", "throughput": 10195.79, "total_tokens": 92798976}
|
| 60 |
+
{"current_steps": 60, "total_steps": 61, "loss": 0.5576, "lr": 3.314775287923677e-08, "epoch": 0.975609756097561, "percentage": 98.36, "elapsed_time": "2:34:16", "remaining_time": "0:02:34", "throughput": 10195.29, "total_tokens": 94371840}
|
| 61 |
+
{"current_steps": 61, "total_steps": 61, "loss": 0.5487, "lr": 0.0, "epoch": 0.991869918699187, "percentage": 100.0, "elapsed_time": "2:36:50", "remaining_time": "0:00:00", "throughput": 10195.34, "total_tokens": 95944704}
|
| 62 |
+
{"current_steps": 61, "total_steps": 61, "epoch": 0.991869918699187, "percentage": 100.0, "elapsed_time": "2:37:16", "remaining_time": "0:00:00", "throughput": 10167.7, "total_tokens": 95944704}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,531 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": null,
|
| 3 |
+
"best_model_checkpoint": null,
|
| 4 |
+
"epoch": 0.991869918699187,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 61,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.016260162601626018,
|
| 13 |
+
"grad_norm": 1.3551084995269775,
|
| 14 |
+
"learning_rate": 4.9966852247120764e-05,
|
| 15 |
+
"loss": 0.8835,
|
| 16 |
+
"num_input_tokens_seen": 1572864,
|
| 17 |
+
"step": 1
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"epoch": 0.032520325203252036,
|
| 21 |
+
"grad_norm": 1.0672752857208252,
|
| 22 |
+
"learning_rate": 4.9867496890364726e-05,
|
| 23 |
+
"loss": 0.8172,
|
| 24 |
+
"num_input_tokens_seen": 3145728,
|
| 25 |
+
"step": 2
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"epoch": 0.04878048780487805,
|
| 29 |
+
"grad_norm": 0.6499503254890442,
|
| 30 |
+
"learning_rate": 4.970219740227693e-05,
|
| 31 |
+
"loss": 0.7415,
|
| 32 |
+
"num_input_tokens_seen": 4718592,
|
| 33 |
+
"step": 3
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"epoch": 0.06504065040650407,
|
| 37 |
+
"grad_norm": 0.4817531704902649,
|
| 38 |
+
"learning_rate": 4.947139212738395e-05,
|
| 39 |
+
"loss": 0.7198,
|
| 40 |
+
"num_input_tokens_seen": 6291456,
|
| 41 |
+
"step": 4
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"epoch": 0.08130081300813008,
|
| 45 |
+
"grad_norm": 0.33635902404785156,
|
| 46 |
+
"learning_rate": 4.9175693119783013e-05,
|
| 47 |
+
"loss": 0.6985,
|
| 48 |
+
"num_input_tokens_seen": 7864320,
|
| 49 |
+
"step": 5
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"epoch": 0.0975609756097561,
|
| 53 |
+
"grad_norm": 0.27813318371772766,
|
| 54 |
+
"learning_rate": 4.881588452008456e-05,
|
| 55 |
+
"loss": 0.6642,
|
| 56 |
+
"num_input_tokens_seen": 9437184,
|
| 57 |
+
"step": 6
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"epoch": 0.11382113821138211,
|
| 61 |
+
"grad_norm": 0.25561287999153137,
|
| 62 |
+
"learning_rate": 4.839292047601234e-05,
|
| 63 |
+
"loss": 0.6677,
|
| 64 |
+
"num_input_tokens_seen": 11010048,
|
| 65 |
+
"step": 7
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"epoch": 0.13008130081300814,
|
| 69 |
+
"grad_norm": 0.25520581007003784,
|
| 70 |
+
"learning_rate": 4.790792261217512e-05,
|
| 71 |
+
"loss": 0.6451,
|
| 72 |
+
"num_input_tokens_seen": 12582912,
|
| 73 |
+
"step": 8
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"epoch": 0.14634146341463414,
|
| 77 |
+
"grad_norm": 0.2094946950674057,
|
| 78 |
+
"learning_rate": 4.736217705571989e-05,
|
| 79 |
+
"loss": 0.6327,
|
| 80 |
+
"num_input_tokens_seen": 14155776,
|
| 81 |
+
"step": 9
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"epoch": 0.16260162601626016,
|
| 85 |
+
"grad_norm": 0.21939203143119812,
|
| 86 |
+
"learning_rate": 4.6757131025753886e-05,
|
| 87 |
+
"loss": 0.6331,
|
| 88 |
+
"num_input_tokens_seen": 15728640,
|
| 89 |
+
"step": 10
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"epoch": 0.17886178861788618,
|
| 93 |
+
"grad_norm": 0.20424292981624603,
|
| 94 |
+
"learning_rate": 4.609438899557964e-05,
|
| 95 |
+
"loss": 0.6219,
|
| 96 |
+
"num_input_tokens_seen": 17301504,
|
| 97 |
+
"step": 11
|
| 98 |
+
},
|
| 99 |
+
{
|
| 100 |
+
"epoch": 0.1951219512195122,
|
| 101 |
+
"grad_norm": 0.19756929576396942,
|
| 102 |
+
"learning_rate": 4.5375708437920284e-05,
|
| 103 |
+
"loss": 0.6205,
|
| 104 |
+
"num_input_tokens_seen": 18874368,
|
| 105 |
+
"step": 12
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"epoch": 0.21138211382113822,
|
| 109 |
+
"grad_norm": 0.18154746294021606,
|
| 110 |
+
"learning_rate": 4.460299516441777e-05,
|
| 111 |
+
"loss": 0.601,
|
| 112 |
+
"num_input_tokens_seen": 20447232,
|
| 113 |
+
"step": 13
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"epoch": 0.22764227642276422,
|
| 117 |
+
"grad_norm": 0.1677803248167038,
|
| 118 |
+
"learning_rate": 4.3778298271762995e-05,
|
| 119 |
+
"loss": 0.6077,
|
| 120 |
+
"num_input_tokens_seen": 22020096,
|
| 121 |
+
"step": 14
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"epoch": 0.24390243902439024,
|
| 125 |
+
"grad_norm": 0.15535381436347961,
|
| 126 |
+
"learning_rate": 4.2903804707859835e-05,
|
| 127 |
+
"loss": 0.5958,
|
| 128 |
+
"num_input_tokens_seen": 23592960,
|
| 129 |
+
"step": 15
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"epoch": 0.2601626016260163,
|
| 133 |
+
"grad_norm": 0.16249153017997742,
|
| 134 |
+
"learning_rate": 4.198183347243233e-05,
|
| 135 |
+
"loss": 0.5838,
|
| 136 |
+
"num_input_tokens_seen": 25165824,
|
| 137 |
+
"step": 16
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"epoch": 0.2764227642276423,
|
| 141 |
+
"grad_norm": 0.145711287856102,
|
| 142 |
+
"learning_rate": 4.101482946745439e-05,
|
| 143 |
+
"loss": 0.5629,
|
| 144 |
+
"num_input_tokens_seen": 26738688,
|
| 145 |
+
"step": 17
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"epoch": 0.2926829268292683,
|
| 149 |
+
"grad_norm": 0.12892590463161469,
|
| 150 |
+
"learning_rate": 4.000535701370921e-05,
|
| 151 |
+
"loss": 0.5848,
|
| 152 |
+
"num_input_tokens_seen": 28311552,
|
| 153 |
+
"step": 18
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"epoch": 0.3089430894308943,
|
| 157 |
+
"grad_norm": 0.12926466763019562,
|
| 158 |
+
"learning_rate": 3.895609305067162e-05,
|
| 159 |
+
"loss": 0.5772,
|
| 160 |
+
"num_input_tokens_seen": 29884416,
|
| 161 |
+
"step": 19
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"epoch": 0.3252032520325203,
|
| 165 |
+
"grad_norm": 0.12316538393497467,
|
| 166 |
+
"learning_rate": 3.7869820037745776e-05,
|
| 167 |
+
"loss": 0.5719,
|
| 168 |
+
"num_input_tokens_seen": 31457280,
|
| 169 |
+
"step": 20
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"epoch": 0.34146341463414637,
|
| 173 |
+
"grad_norm": 0.12052515894174576,
|
| 174 |
+
"learning_rate": 3.6749418575683e-05,
|
| 175 |
+
"loss": 0.5445,
|
| 176 |
+
"num_input_tokens_seen": 33030144,
|
| 177 |
+
"step": 21
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"epoch": 0.35772357723577236,
|
| 181 |
+
"grad_norm": 0.12180831283330917,
|
| 182 |
+
"learning_rate": 3.5597859767746524e-05,
|
| 183 |
+
"loss": 0.556,
|
| 184 |
+
"num_input_tokens_seen": 34603008,
|
| 185 |
+
"step": 22
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"epoch": 0.37398373983739835,
|
| 189 |
+
"grad_norm": 0.12532484531402588,
|
| 190 |
+
"learning_rate": 3.4418197340879635e-05,
|
| 191 |
+
"loss": 0.5736,
|
| 192 |
+
"num_input_tokens_seen": 36175872,
|
| 193 |
+
"step": 23
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"epoch": 0.3902439024390244,
|
| 197 |
+
"grad_norm": 0.12291638553142548,
|
| 198 |
+
"learning_rate": 3.321355954777087e-05,
|
| 199 |
+
"loss": 0.535,
|
| 200 |
+
"num_input_tokens_seen": 37748736,
|
| 201 |
+
"step": 24
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"epoch": 0.4065040650406504,
|
| 205 |
+
"grad_norm": 0.11071067303419113,
|
| 206 |
+
"learning_rate": 3.1987140871290236e-05,
|
| 207 |
+
"loss": 0.5634,
|
| 208 |
+
"num_input_tokens_seen": 39321600,
|
| 209 |
+
"step": 25
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"epoch": 0.42276422764227645,
|
| 213 |
+
"grad_norm": 0.09884963929653168,
|
| 214 |
+
"learning_rate": 3.07421935532949e-05,
|
| 215 |
+
"loss": 0.5648,
|
| 216 |
+
"num_input_tokens_seen": 40894464,
|
| 217 |
+
"step": 26
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"epoch": 0.43902439024390244,
|
| 221 |
+
"grad_norm": 0.10612273216247559,
|
| 222 |
+
"learning_rate": 2.9482018970268393e-05,
|
| 223 |
+
"loss": 0.5467,
|
| 224 |
+
"num_input_tokens_seen": 42467328,
|
| 225 |
+
"step": 27
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"epoch": 0.45528455284552843,
|
| 229 |
+
"grad_norm": 0.1015135645866394,
|
| 230 |
+
"learning_rate": 2.8209958878663778e-05,
|
| 231 |
+
"loss": 0.5567,
|
| 232 |
+
"num_input_tokens_seen": 44040192,
|
| 233 |
+
"step": 28
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"epoch": 0.4715447154471545,
|
| 237 |
+
"grad_norm": 0.10951631516218185,
|
| 238 |
+
"learning_rate": 2.6929386553166164e-05,
|
| 239 |
+
"loss": 0.5847,
|
| 240 |
+
"num_input_tokens_seen": 45613056,
|
| 241 |
+
"step": 29
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"epoch": 0.4878048780487805,
|
| 245 |
+
"grad_norm": 0.09695735573768616,
|
| 246 |
+
"learning_rate": 2.564369784137472e-05,
|
| 247 |
+
"loss": 0.5429,
|
| 248 |
+
"num_input_tokens_seen": 47185920,
|
| 249 |
+
"step": 30
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"epoch": 0.5040650406504065,
|
| 253 |
+
"grad_norm": 0.09722153842449188,
|
| 254 |
+
"learning_rate": 2.4356302158625288e-05,
|
| 255 |
+
"loss": 0.5435,
|
| 256 |
+
"num_input_tokens_seen": 48758784,
|
| 257 |
+
"step": 31
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"epoch": 0.5203252032520326,
|
| 261 |
+
"grad_norm": 0.0972503125667572,
|
| 262 |
+
"learning_rate": 2.3070613446833842e-05,
|
| 263 |
+
"loss": 0.5482,
|
| 264 |
+
"num_input_tokens_seen": 50331648,
|
| 265 |
+
"step": 32
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"epoch": 0.5365853658536586,
|
| 269 |
+
"grad_norm": 0.09666866064071655,
|
| 270 |
+
"learning_rate": 2.1790041121336225e-05,
|
| 271 |
+
"loss": 0.5418,
|
| 272 |
+
"num_input_tokens_seen": 51904512,
|
| 273 |
+
"step": 33
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"epoch": 0.5528455284552846,
|
| 277 |
+
"grad_norm": 0.09581635892391205,
|
| 278 |
+
"learning_rate": 2.0517981029731616e-05,
|
| 279 |
+
"loss": 0.532,
|
| 280 |
+
"num_input_tokens_seen": 53477376,
|
| 281 |
+
"step": 34
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"epoch": 0.5691056910569106,
|
| 285 |
+
"grad_norm": 0.09590643644332886,
|
| 286 |
+
"learning_rate": 1.9257806446705116e-05,
|
| 287 |
+
"loss": 0.536,
|
| 288 |
+
"num_input_tokens_seen": 55050240,
|
| 289 |
+
"step": 35
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"epoch": 0.5853658536585366,
|
| 293 |
+
"grad_norm": 0.0966959148645401,
|
| 294 |
+
"learning_rate": 1.8012859128709766e-05,
|
| 295 |
+
"loss": 0.5314,
|
| 296 |
+
"num_input_tokens_seen": 56623104,
|
| 297 |
+
"step": 36
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"epoch": 0.6016260162601627,
|
| 301 |
+
"grad_norm": 0.09263308346271515,
|
| 302 |
+
"learning_rate": 1.6786440452229134e-05,
|
| 303 |
+
"loss": 0.5595,
|
| 304 |
+
"num_input_tokens_seen": 58195968,
|
| 305 |
+
"step": 37
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"epoch": 0.6178861788617886,
|
| 309 |
+
"grad_norm": 0.08845654875040054,
|
| 310 |
+
"learning_rate": 1.558180265912037e-05,
|
| 311 |
+
"loss": 0.5418,
|
| 312 |
+
"num_input_tokens_seen": 59768832,
|
| 313 |
+
"step": 38
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"epoch": 0.6341463414634146,
|
| 317 |
+
"grad_norm": 0.0915597528219223,
|
| 318 |
+
"learning_rate": 1.4402140232253486e-05,
|
| 319 |
+
"loss": 0.5438,
|
| 320 |
+
"num_input_tokens_seen": 61341696,
|
| 321 |
+
"step": 39
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"epoch": 0.6504065040650406,
|
| 325 |
+
"grad_norm": 0.08968537300825119,
|
| 326 |
+
"learning_rate": 1.325058142431701e-05,
|
| 327 |
+
"loss": 0.5239,
|
| 328 |
+
"num_input_tokens_seen": 62914560,
|
| 329 |
+
"step": 40
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"epoch": 0.6666666666666666,
|
| 333 |
+
"grad_norm": 0.0950733870267868,
|
| 334 |
+
"learning_rate": 1.213017996225424e-05,
|
| 335 |
+
"loss": 0.5459,
|
| 336 |
+
"num_input_tokens_seen": 64487424,
|
| 337 |
+
"step": 41
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"epoch": 0.6829268292682927,
|
| 341 |
+
"grad_norm": 0.09233593195676804,
|
| 342 |
+
"learning_rate": 1.1043906949328387e-05,
|
| 343 |
+
"loss": 0.5373,
|
| 344 |
+
"num_input_tokens_seen": 66060288,
|
| 345 |
+
"step": 42
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"epoch": 0.6991869918699187,
|
| 349 |
+
"grad_norm": 0.0939360037446022,
|
| 350 |
+
"learning_rate": 9.994642986290797e-06,
|
| 351 |
+
"loss": 0.5474,
|
| 352 |
+
"num_input_tokens_seen": 67633152,
|
| 353 |
+
"step": 43
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"epoch": 0.7154471544715447,
|
| 357 |
+
"grad_norm": 0.08978823572397232,
|
| 358 |
+
"learning_rate": 8.985170532545622e-06,
|
| 359 |
+
"loss": 0.5236,
|
| 360 |
+
"num_input_tokens_seen": 69206016,
|
| 361 |
+
"step": 44
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"epoch": 0.7317073170731707,
|
| 365 |
+
"grad_norm": 0.09436903893947601,
|
| 366 |
+
"learning_rate": 8.018166527567672e-06,
|
| 367 |
+
"loss": 0.5336,
|
| 368 |
+
"num_input_tokens_seen": 70778880,
|
| 369 |
+
"step": 45
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"epoch": 0.7479674796747967,
|
| 373 |
+
"grad_norm": 0.088426373898983,
|
| 374 |
+
"learning_rate": 7.096195292140173e-06,
|
| 375 |
+
"loss": 0.5198,
|
| 376 |
+
"num_input_tokens_seen": 72351744,
|
| 377 |
+
"step": 46
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"epoch": 0.7642276422764228,
|
| 381 |
+
"grad_norm": 0.09146667271852493,
|
| 382 |
+
"learning_rate": 6.221701728237009e-06,
|
| 383 |
+
"loss": 0.5419,
|
| 384 |
+
"num_input_tokens_seen": 73924608,
|
| 385 |
+
"step": 47
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"epoch": 0.7804878048780488,
|
| 389 |
+
"grad_norm": 0.0876329094171524,
|
| 390 |
+
"learning_rate": 5.397004835582242e-06,
|
| 391 |
+
"loss": 0.5544,
|
| 392 |
+
"num_input_tokens_seen": 75497472,
|
| 393 |
+
"step": 48
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"epoch": 0.7967479674796748,
|
| 397 |
+
"grad_norm": 0.09173890948295593,
|
| 398 |
+
"learning_rate": 4.624291562079719e-06,
|
| 399 |
+
"loss": 0.5689,
|
| 400 |
+
"num_input_tokens_seen": 77070336,
|
| 401 |
+
"step": 49
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"epoch": 0.8130081300813008,
|
| 405 |
+
"grad_norm": 0.09023137390613556,
|
| 406 |
+
"learning_rate": 3.90561100442036e-06,
|
| 407 |
+
"loss": 0.517,
|
| 408 |
+
"num_input_tokens_seen": 78643200,
|
| 409 |
+
"step": 50
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"epoch": 0.8292682926829268,
|
| 413 |
+
"grad_norm": 0.08453888446092606,
|
| 414 |
+
"learning_rate": 3.2428689742461188e-06,
|
| 415 |
+
"loss": 0.544,
|
| 416 |
+
"num_input_tokens_seen": 80216064,
|
| 417 |
+
"step": 51
|
| 418 |
+
},
|
| 419 |
+
{
|
| 420 |
+
"epoch": 0.8455284552845529,
|
| 421 |
+
"grad_norm": 0.09456021338701248,
|
| 422 |
+
"learning_rate": 2.637822944280116e-06,
|
| 423 |
+
"loss": 0.5265,
|
| 424 |
+
"num_input_tokens_seen": 81788928,
|
| 425 |
+
"step": 52
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"epoch": 0.8617886178861789,
|
| 429 |
+
"grad_norm": 0.09165485203266144,
|
| 430 |
+
"learning_rate": 2.092077387824884e-06,
|
| 431 |
+
"loss": 0.5301,
|
| 432 |
+
"num_input_tokens_seen": 83361792,
|
| 433 |
+
"step": 53
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"epoch": 0.8780487804878049,
|
| 437 |
+
"grad_norm": 0.0885196402668953,
|
| 438 |
+
"learning_rate": 1.6070795239876618e-06,
|
| 439 |
+
"loss": 0.5357,
|
| 440 |
+
"num_input_tokens_seen": 84934656,
|
| 441 |
+
"step": 54
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"epoch": 0.8943089430894309,
|
| 445 |
+
"grad_norm": 0.09256651252508163,
|
| 446 |
+
"learning_rate": 1.1841154799154374e-06,
|
| 447 |
+
"loss": 0.522,
|
| 448 |
+
"num_input_tokens_seen": 86507520,
|
| 449 |
+
"step": 55
|
| 450 |
+
},
|
| 451 |
+
{
|
| 452 |
+
"epoch": 0.9105691056910569,
|
| 453 |
+
"grad_norm": 0.0941980704665184,
|
| 454 |
+
"learning_rate": 8.243068802169906e-07,
|
| 455 |
+
"loss": 0.5282,
|
| 456 |
+
"num_input_tokens_seen": 88080384,
|
| 457 |
+
"step": 56
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"epoch": 0.926829268292683,
|
| 461 |
+
"grad_norm": 0.08401526510715485,
|
| 462 |
+
"learning_rate": 5.286078726160549e-07,
|
| 463 |
+
"loss": 0.5327,
|
| 464 |
+
"num_input_tokens_seen": 89653248,
|
| 465 |
+
"step": 57
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"epoch": 0.943089430894309,
|
| 469 |
+
"grad_norm": 0.08636080473661423,
|
| 470 |
+
"learning_rate": 2.978025977230736e-07,
|
| 471 |
+
"loss": 0.5237,
|
| 472 |
+
"num_input_tokens_seen": 91226112,
|
| 473 |
+
"step": 58
|
| 474 |
+
},
|
| 475 |
+
{
|
| 476 |
+
"epoch": 0.959349593495935,
|
| 477 |
+
"grad_norm": 0.09349840134382248,
|
| 478 |
+
"learning_rate": 1.3250310963527358e-07,
|
| 479 |
+
"loss": 0.5499,
|
| 480 |
+
"num_input_tokens_seen": 92798976,
|
| 481 |
+
"step": 59
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"epoch": 0.975609756097561,
|
| 485 |
+
"grad_norm": 0.08632226288318634,
|
| 486 |
+
"learning_rate": 3.314775287923677e-08,
|
| 487 |
+
"loss": 0.5576,
|
| 488 |
+
"num_input_tokens_seen": 94371840,
|
| 489 |
+
"step": 60
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"epoch": 0.991869918699187,
|
| 493 |
+
"grad_norm": 0.09036959707736969,
|
| 494 |
+
"learning_rate": 0.0,
|
| 495 |
+
"loss": 0.5487,
|
| 496 |
+
"num_input_tokens_seen": 95944704,
|
| 497 |
+
"step": 61
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"epoch": 0.991869918699187,
|
| 501 |
+
"num_input_tokens_seen": 95944704,
|
| 502 |
+
"step": 61,
|
| 503 |
+
"total_flos": 4.0703306623541576e+18,
|
| 504 |
+
"train_loss": 0.5785222493234228,
|
| 505 |
+
"train_runtime": 9437.3939,
|
| 506 |
+
"train_samples_per_second": 2.499,
|
| 507 |
+
"train_steps_per_second": 0.006
|
| 508 |
+
}
|
| 509 |
+
],
|
| 510 |
+
"logging_steps": 1,
|
| 511 |
+
"max_steps": 61,
|
| 512 |
+
"num_input_tokens_seen": 95944704,
|
| 513 |
+
"num_train_epochs": 1,
|
| 514 |
+
"save_steps": 1000,
|
| 515 |
+
"stateful_callbacks": {
|
| 516 |
+
"TrainerControl": {
|
| 517 |
+
"args": {
|
| 518 |
+
"should_epoch_stop": false,
|
| 519 |
+
"should_evaluate": false,
|
| 520 |
+
"should_log": false,
|
| 521 |
+
"should_save": true,
|
| 522 |
+
"should_training_stop": true
|
| 523 |
+
},
|
| 524 |
+
"attributes": {}
|
| 525 |
+
}
|
| 526 |
+
},
|
| 527 |
+
"total_flos": 4.0703306623541576e+18,
|
| 528 |
+
"train_batch_size": 16,
|
| 529 |
+
"trial_name": null,
|
| 530 |
+
"trial_params": null
|
| 531 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:444317418b08a04afeda97e23512c9220f7eff61887ad97eed28ba0f22a22c6f
|
| 3 |
+
size 5624
|
training_args.yaml
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
apollo_rank: 256
|
| 2 |
+
apollo_scale: 1
|
| 3 |
+
apollo_target: all
|
| 4 |
+
apollo_update_interval: 200
|
| 5 |
+
bf16: true
|
| 6 |
+
cutoff_len: 4096
|
| 7 |
+
dataset: codes_nlx_under8
|
| 8 |
+
dataset_dir: data
|
| 9 |
+
ddp_timeout: 180000000
|
| 10 |
+
do_train: true
|
| 11 |
+
enable_liger_kernel: true
|
| 12 |
+
finetuning_type: freeze
|
| 13 |
+
flash_attn: auto
|
| 14 |
+
freeze_trainable_layers: 2
|
| 15 |
+
freeze_trainable_modules: all
|
| 16 |
+
gradient_accumulation_steps: 8
|
| 17 |
+
include_num_input_tokens_seen: true
|
| 18 |
+
learning_rate: 5.0e-05
|
| 19 |
+
logging_steps: 1
|
| 20 |
+
lr_scheduler_type: cosine
|
| 21 |
+
max_grad_norm: 1.0
|
| 22 |
+
max_samples: 50000000
|
| 23 |
+
model_name_or_path: Qwen/Qwen2.5-Coder-7B-Instruct
|
| 24 |
+
neat_packing: true
|
| 25 |
+
num_train_epochs: 1.0
|
| 26 |
+
output_dir: saves/Qwen2.5-Coder-7B-Instruct/freeze/qwen_under8_nlx
|
| 27 |
+
packing: true
|
| 28 |
+
per_device_train_batch_size: 16
|
| 29 |
+
plot_loss: true
|
| 30 |
+
preprocessing_num_workers: 16
|
| 31 |
+
report_to: none
|
| 32 |
+
rope_scaling: llama3
|
| 33 |
+
save_steps: 1000
|
| 34 |
+
stage: sft
|
| 35 |
+
template: qwen
|
| 36 |
+
trust_remote_code: true
|
| 37 |
+
use_apollo: true
|
| 38 |
+
use_llama_pro: true
|
| 39 |
+
warmup_steps: 0
|
training_loss.png
ADDED
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|