diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a0a4810e01fec92dc5cb7cf6c57f5e6625463b14
--- /dev/null
+++ b/README.md
@@ -0,0 +1,142 @@
+---
+license: apache-2.0
+base_model: mistralai/Mistral-7B-v0.1
+tags:
+- generated_from_trainer
+model-index:
+- name: out
+ results: []
+---
+
+
+
+[
](https://github.com/OpenAccess-AI-Collective/axolotl)
+See axolotl config
+
+axolotl version: `0.3.0`
+```yaml
+base_model: mistralai/Mistral-7B-v0.1
+model_type: MistralForCausalLM
+tokenizer_type: LlamaTokenizer
+is_mistral_derived_model: true
+
+load_in_8bit: false
+load_in_4bit: false
+strict: false
+
+datasets:
+ - path: mhenrichsen/alpaca_2k_test
+ type: alpaca
+dataset_prepared_path:
+val_set_size: 0.05
+output_dir: ./out
+
+sequence_len: 8192
+sample_packing: true
+pad_to_sequence_len: true
+eval_sample_packing: false
+
+wandb_project:
+wandb_entity:
+wandb_watch:
+wandb_name:
+wandb_log_model:
+
+gradient_accumulation_steps: 4
+micro_batch_size: 2
+num_epochs: 4
+optimizer: adamw_bnb_8bit
+lr_scheduler: cosine
+learning_rate: 0.000005
+
+train_on_inputs: false
+group_by_length: false
+bf16: auto
+fp16:
+tf32: false
+
+gradient_checkpointing: true
+early_stopping_patience:
+resume_from_checkpoint:
+local_rank:
+logging_steps: 1
+xformers_attention:
+flash_attention: true
+
+warmup_steps: 10
+evals_per_epoch: 4
+eval_table_size:
+eval_table_max_new_tokens: 128
+saves_per_epoch: 1
+debug:
+deepspeed:
+weight_decay: 0.0
+fsdp:
+fsdp_config:
+special_tokens:
+ bos_token: ""
+ eos_token: ""
+ unk_token: ""
+
+```
+
+
+
+# out
+
+This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
+It achieves the following results on the evaluation set:
+- Loss: 0.8633
+
+## Model description
+
+More information needed
+
+## Intended uses & limitations
+
+More information needed
+
+## Training and evaluation data
+
+More information needed
+
+## Training procedure
+
+### Training hyperparameters
+
+The following hyperparameters were used during training:
+- learning_rate: 5e-06
+- train_batch_size: 2
+- eval_batch_size: 2
+- seed: 42
+- gradient_accumulation_steps: 4
+- total_train_batch_size: 8
+- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
+- lr_scheduler_type: cosine
+- lr_scheduler_warmup_steps: 10
+- num_epochs: 4
+
+### Training results
+
+| Training Loss | Epoch | Step | Validation Loss |
+|:-------------:|:-----:|:----:|:---------------:|
+| 0.9984 | 0.17 | 1 | 1.0934 |
+| 0.9864 | 0.35 | 2 | 1.0603 |
+| 0.9181 | 0.7 | 4 | 0.9132 |
+| 0.8843 | 1.04 | 6 | 0.8623 |
+| 0.8513 | 1.3 | 8 | 0.8310 |
+| 0.7957 | 1.65 | 10 | 0.8248 |
+| 0.7823 | 2.0 | 12 | 0.8221 |
+| 0.5977 | 2.26 | 14 | 0.8459 |
+| 0.5766 | 2.61 | 16 | 0.8648 |
+| 0.546 | 2.96 | 18 | 0.8637 |
+| 0.5024 | 3.22 | 20 | 0.8633 |
+
+
+### Framework versions
+
+- Transformers 4.37.0
+- Pytorch 2.0.1+cu118
+- Datasets 2.16.1
+- Tokenizers 0.15.0
diff --git a/checkpoint-10/config.json b/checkpoint-10/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e63d58534ffc3ffaa37510034b9c34b4a4bf835
--- /dev/null
+++ b/checkpoint-10/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/checkpoint-10/generation_config.json b/checkpoint-10/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..532693186de3efbebe0ac31b55ddb88f3f226364
--- /dev/null
+++ b/checkpoint-10/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.0"
+}
diff --git a/checkpoint-10/model-00001-of-00004.safetensors b/checkpoint-10/model-00001-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d79dd8b1edd39a159b8130b2ce0a3c56a5abec20
--- /dev/null
+++ b/checkpoint-10/model-00001-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:277763661675c081df0cf8784c98981339b13aae2080aa644704ba95a956ffef
+size 4775355104
diff --git a/checkpoint-10/model-00002-of-00004.safetensors b/checkpoint-10/model-00002-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d38e080eb085dd28f18f9a7a575b450766f694aa
--- /dev/null
+++ b/checkpoint-10/model-00002-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:406cee6223d228133371812c03053003e0bfaf7853cc05c6cd4f6da00733a751
+size 4982990072
diff --git a/checkpoint-10/model-00003-of-00004.safetensors b/checkpoint-10/model-00003-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..068bc985e9148b054e1fee34b123f8df322975b4
--- /dev/null
+++ b/checkpoint-10/model-00003-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2695ebfa73cf60f45858d42d7b19f472b9f06ae0b059584a920b1c308d05c944
+size 4982990088
diff --git a/checkpoint-10/model-00004-of-00004.safetensors b/checkpoint-10/model-00004-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b3f4551a10151825aec390e8e0b8d57c602e1c18
--- /dev/null
+++ b/checkpoint-10/model-00004-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:07654ec70a604f3f85dc8f0483d187799eb9cab8ad6f98d4ca66cb36727fbeb9
+size 3500259224
diff --git a/checkpoint-10/model.safetensors.index.json b/checkpoint-10/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b83d48a2949ebf06f421d7198b065d7ee584baf
--- /dev/null
+++ b/checkpoint-10/model.safetensors.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 18241560576
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.norm.weight": "model-00004-of-00004.safetensors"
+ }
+}
diff --git a/checkpoint-10/optimizer.pt b/checkpoint-10/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c9806ac4ef591065b058069d6aa2170dc9b239a4
--- /dev/null
+++ b/checkpoint-10/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3e44fc2cac715b96a8616f4cfe6d205fd6655d48100b44bcad3f33aa75e27b6e
+size 14512102791
diff --git a/checkpoint-10/rng_state.pth b/checkpoint-10/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2d80ea84c5f73d441972e03ddda3329bcd15dedb
--- /dev/null
+++ b/checkpoint-10/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1dd247f5d8a4bdaa46e9a22dea4b3ff7e3cc6bbd0eca5a0dcd56fe15ceba641a
+size 14575
diff --git a/checkpoint-10/scheduler.pt b/checkpoint-10/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f4659a9420dce5b180a22220372159ed2422a605
--- /dev/null
+++ b/checkpoint-10/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6bea30c714dd9212c63050584655a2d87d875913faaedd2a1cdf189daac1902
+size 627
diff --git a/checkpoint-10/trainer_state.json b/checkpoint-10/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..3196dedf33d7105bb0b2841936674083979e05b7
--- /dev/null
+++ b/checkpoint-10/trainer_state.json
@@ -0,0 +1,129 @@
+{
+ "best_metric": 0.8248077630996704,
+ "best_model_checkpoint": "./out/checkpoint-10",
+ "epoch": 1.6521739130434783,
+ "eval_steps": 2,
+ "global_step": 10,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.17,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 0.9984,
+ "step": 1
+ },
+ {
+ "epoch": 0.17,
+ "eval_loss": 1.09337317943573,
+ "eval_runtime": 65.1601,
+ "eval_samples_per_second": 1.535,
+ "eval_steps_per_second": 0.767,
+ "step": 1
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.0000000000000002e-06,
+ "loss": 0.9864,
+ "step": 2
+ },
+ {
+ "epoch": 0.35,
+ "eval_loss": 1.0603257417678833,
+ "eval_runtime": 66.8005,
+ "eval_samples_per_second": 1.497,
+ "eval_steps_per_second": 0.748,
+ "step": 2
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.5e-06,
+ "loss": 0.9692,
+ "step": 3
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 2.0000000000000003e-06,
+ "loss": 0.9181,
+ "step": 4
+ },
+ {
+ "epoch": 0.7,
+ "eval_loss": 0.9132175445556641,
+ "eval_runtime": 65.6994,
+ "eval_samples_per_second": 1.522,
+ "eval_steps_per_second": 0.761,
+ "step": 4
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 2.5e-06,
+ "loss": 0.9237,
+ "step": 5
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3e-06,
+ "loss": 0.8843,
+ "step": 6
+ },
+ {
+ "epoch": 1.04,
+ "eval_loss": 0.8623057007789612,
+ "eval_runtime": 64.7101,
+ "eval_samples_per_second": 1.545,
+ "eval_steps_per_second": 0.773,
+ "step": 6
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.5e-06,
+ "loss": 0.8434,
+ "step": 7
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 4.000000000000001e-06,
+ "loss": 0.8513,
+ "step": 8
+ },
+ {
+ "epoch": 1.3,
+ "eval_loss": 0.8309622406959534,
+ "eval_runtime": 64.996,
+ "eval_samples_per_second": 1.539,
+ "eval_steps_per_second": 0.769,
+ "step": 8
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 4.5e-06,
+ "loss": 0.8174,
+ "step": 9
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 5e-06,
+ "loss": 0.7957,
+ "step": 10
+ },
+ {
+ "epoch": 1.65,
+ "eval_loss": 0.8248077630996704,
+ "eval_runtime": 65.0362,
+ "eval_samples_per_second": 1.538,
+ "eval_steps_per_second": 0.769,
+ "step": 10
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 20,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 5,
+ "total_flos": 2.796025320308736e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-10/training_args.bin b/checkpoint-10/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3b9fc99f7611fc7ae3d5b56c2ade5da036b647e1
--- /dev/null
+++ b/checkpoint-10/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b96db33098f0c4b8328823654626c47a8539e58eff74bcc89a8c6810e5c11e7
+size 4795
diff --git a/checkpoint-15/config.json b/checkpoint-15/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e63d58534ffc3ffaa37510034b9c34b4a4bf835
--- /dev/null
+++ b/checkpoint-15/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/checkpoint-15/generation_config.json b/checkpoint-15/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..532693186de3efbebe0ac31b55ddb88f3f226364
--- /dev/null
+++ b/checkpoint-15/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.0"
+}
diff --git a/checkpoint-15/model-00001-of-00004.safetensors b/checkpoint-15/model-00001-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f3e60bd6555266e9304cd5b8f7a78538ab69d923
--- /dev/null
+++ b/checkpoint-15/model-00001-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b831e96c8e65c5e4f8389b42790d82b1969d5814e26e0c3636bd0a08edfae5d
+size 4775355104
diff --git a/checkpoint-15/model-00002-of-00004.safetensors b/checkpoint-15/model-00002-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d7ae407b772ca4e02f298158a99e802acc57966d
--- /dev/null
+++ b/checkpoint-15/model-00002-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b9e4ebcc65cea1d71255bfc575836760eba69ca848f736b557f9fde38d75e2a
+size 4982990072
diff --git a/checkpoint-15/model-00003-of-00004.safetensors b/checkpoint-15/model-00003-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..65904e78c6b8d88ef64d5312f00ad9f54b23524b
--- /dev/null
+++ b/checkpoint-15/model-00003-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2bca7eb6e38844d14427a6888b43637e206d45039d110a2f40ccfcfb302ec6ac
+size 4982990088
diff --git a/checkpoint-15/model-00004-of-00004.safetensors b/checkpoint-15/model-00004-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e62cb15dedf355f6f1dd813d4d48db0827d9973e
--- /dev/null
+++ b/checkpoint-15/model-00004-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bcee218a9f0f5da641c31bdb69580bfed09d262381ff08ed71b8231058450598
+size 3500259224
diff --git a/checkpoint-15/model.safetensors.index.json b/checkpoint-15/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b83d48a2949ebf06f421d7198b065d7ee584baf
--- /dev/null
+++ b/checkpoint-15/model.safetensors.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 18241560576
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.norm.weight": "model-00004-of-00004.safetensors"
+ }
+}
diff --git a/checkpoint-15/optimizer.pt b/checkpoint-15/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1c5622fee4ef45d1cff7c4695795ffb1bfbafd88
--- /dev/null
+++ b/checkpoint-15/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aff9265cbb3809facf1df0c4cf15abdcb92db4a5bde6bbf4899152b8caad05ce
+size 14512102791
diff --git a/checkpoint-15/rng_state.pth b/checkpoint-15/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2cdb9b6f14132b9cae3da20544139e42f14812c0
--- /dev/null
+++ b/checkpoint-15/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8400c928f00bba2be3b202ea8da8ba62c6a19dbbf1f76dbc77e0305cb22a1fdf
+size 14575
diff --git a/checkpoint-15/scheduler.pt b/checkpoint-15/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..80d8fcfda758e630f62ba6a9b9ca19de636d107c
--- /dev/null
+++ b/checkpoint-15/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5298287830406f40471aa4c35c89830a3b9baac97776b5a0c824f467ea070379
+size 627
diff --git a/checkpoint-15/trainer_state.json b/checkpoint-15/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..8d69c74a17f51fb274c4204f1944208b148e99c4
--- /dev/null
+++ b/checkpoint-15/trainer_state.json
@@ -0,0 +1,175 @@
+{
+ "best_metric": 0.8248077630996704,
+ "best_model_checkpoint": "./out/checkpoint-10",
+ "epoch": 2.4347826086956523,
+ "eval_steps": 2,
+ "global_step": 15,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.17,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 0.9984,
+ "step": 1
+ },
+ {
+ "epoch": 0.17,
+ "eval_loss": 1.09337317943573,
+ "eval_runtime": 65.1601,
+ "eval_samples_per_second": 1.535,
+ "eval_steps_per_second": 0.767,
+ "step": 1
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.0000000000000002e-06,
+ "loss": 0.9864,
+ "step": 2
+ },
+ {
+ "epoch": 0.35,
+ "eval_loss": 1.0603257417678833,
+ "eval_runtime": 66.8005,
+ "eval_samples_per_second": 1.497,
+ "eval_steps_per_second": 0.748,
+ "step": 2
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.5e-06,
+ "loss": 0.9692,
+ "step": 3
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 2.0000000000000003e-06,
+ "loss": 0.9181,
+ "step": 4
+ },
+ {
+ "epoch": 0.7,
+ "eval_loss": 0.9132175445556641,
+ "eval_runtime": 65.6994,
+ "eval_samples_per_second": 1.522,
+ "eval_steps_per_second": 0.761,
+ "step": 4
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 2.5e-06,
+ "loss": 0.9237,
+ "step": 5
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3e-06,
+ "loss": 0.8843,
+ "step": 6
+ },
+ {
+ "epoch": 1.04,
+ "eval_loss": 0.8623057007789612,
+ "eval_runtime": 64.7101,
+ "eval_samples_per_second": 1.545,
+ "eval_steps_per_second": 0.773,
+ "step": 6
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.5e-06,
+ "loss": 0.8434,
+ "step": 7
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 4.000000000000001e-06,
+ "loss": 0.8513,
+ "step": 8
+ },
+ {
+ "epoch": 1.3,
+ "eval_loss": 0.8309622406959534,
+ "eval_runtime": 64.996,
+ "eval_samples_per_second": 1.539,
+ "eval_steps_per_second": 0.769,
+ "step": 8
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 4.5e-06,
+ "loss": 0.8174,
+ "step": 9
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 5e-06,
+ "loss": 0.7957,
+ "step": 10
+ },
+ {
+ "epoch": 1.65,
+ "eval_loss": 0.8248077630996704,
+ "eval_runtime": 65.0362,
+ "eval_samples_per_second": 1.538,
+ "eval_steps_per_second": 0.769,
+ "step": 10
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 4.8776412907378845e-06,
+ "loss": 0.7513,
+ "step": 11
+ },
+ {
+ "epoch": 2.0,
+ "learning_rate": 4.522542485937369e-06,
+ "loss": 0.7823,
+ "step": 12
+ },
+ {
+ "epoch": 2.0,
+ "eval_loss": 0.822129487991333,
+ "eval_runtime": 64.6722,
+ "eval_samples_per_second": 1.546,
+ "eval_steps_per_second": 0.773,
+ "step": 12
+ },
+ {
+ "epoch": 2.09,
+ "learning_rate": 3.969463130731183e-06,
+ "loss": 0.681,
+ "step": 13
+ },
+ {
+ "epoch": 2.26,
+ "learning_rate": 3.272542485937369e-06,
+ "loss": 0.5977,
+ "step": 14
+ },
+ {
+ "epoch": 2.26,
+ "eval_loss": 0.845899224281311,
+ "eval_runtime": 64.9298,
+ "eval_samples_per_second": 1.54,
+ "eval_steps_per_second": 0.77,
+ "step": 14
+ },
+ {
+ "epoch": 2.43,
+ "learning_rate": 2.5e-06,
+ "loss": 0.5913,
+ "step": 15
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 20,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 5,
+ "total_flos": 4.194037980463104e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-15/training_args.bin b/checkpoint-15/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3b9fc99f7611fc7ae3d5b56c2ade5da036b647e1
--- /dev/null
+++ b/checkpoint-15/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b96db33098f0c4b8328823654626c47a8539e58eff74bcc89a8c6810e5c11e7
+size 4795
diff --git a/checkpoint-20/config.json b/checkpoint-20/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e63d58534ffc3ffaa37510034b9c34b4a4bf835
--- /dev/null
+++ b/checkpoint-20/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/checkpoint-20/generation_config.json b/checkpoint-20/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..532693186de3efbebe0ac31b55ddb88f3f226364
--- /dev/null
+++ b/checkpoint-20/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.0"
+}
diff --git a/checkpoint-20/model-00001-of-00004.safetensors b/checkpoint-20/model-00001-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..eefe8a0a420813e02c39bf712d430a0a17a192d5
--- /dev/null
+++ b/checkpoint-20/model-00001-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01f7bdd06f92fb2f2f946b6e189eb6b32471fe624cd3281a4a6ab7026f5da600
+size 4775355104
diff --git a/checkpoint-20/model-00002-of-00004.safetensors b/checkpoint-20/model-00002-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2dec86be30f327bdac834c94f3a63c9c5516ccb1
--- /dev/null
+++ b/checkpoint-20/model-00002-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:659a9f3d0e5fee2665935a7e2541294247df0462f9045d9e525df8aa6e2b02c8
+size 4982990072
diff --git a/checkpoint-20/model-00003-of-00004.safetensors b/checkpoint-20/model-00003-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..de21223df8c915ded04aa39436e357f99fa567e6
--- /dev/null
+++ b/checkpoint-20/model-00003-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:92881b64802651ae36ad3bb667207f6890f6d228136c80e3cb2556556a0cdf01
+size 4982990088
diff --git a/checkpoint-20/model-00004-of-00004.safetensors b/checkpoint-20/model-00004-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c3b1faf6c596cbdaba432ce91a00ee5bba814c57
--- /dev/null
+++ b/checkpoint-20/model-00004-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eebd8b2d824a50a78d15bf80426f65a6ff3394955fe99bdf948210e0dd0b4660
+size 3500259224
diff --git a/checkpoint-20/model.safetensors.index.json b/checkpoint-20/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b83d48a2949ebf06f421d7198b065d7ee584baf
--- /dev/null
+++ b/checkpoint-20/model.safetensors.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 18241560576
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.norm.weight": "model-00004-of-00004.safetensors"
+ }
+}
diff --git a/checkpoint-20/optimizer.pt b/checkpoint-20/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..462df7084b5f1800613fa25536d176397ad9660e
--- /dev/null
+++ b/checkpoint-20/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:020804881feaa7d20c637438fc81e02f255c02bf6fe1c96fb8490b67f6e8be9e
+size 14512102791
diff --git a/checkpoint-20/rng_state.pth b/checkpoint-20/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6e3558349a8c95107f4b2676c261554ac5f22b76
--- /dev/null
+++ b/checkpoint-20/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:181e781485251674b4c51aee92a3dd818c77e85e6a3bd08df4a20977136eb52d
+size 14575
diff --git a/checkpoint-20/scheduler.pt b/checkpoint-20/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ef1dbb7a480009e51b2070a25cc81100e3f92104
--- /dev/null
+++ b/checkpoint-20/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:843bc578b27264f5f381585143d742735d5c6aa95ab86d05ee4eb405d698e4b6
+size 627
diff --git a/checkpoint-20/trainer_state.json b/checkpoint-20/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..5f21478ce5b798ace8db0977ba322db89de9eb3c
--- /dev/null
+++ b/checkpoint-20/trainer_state.json
@@ -0,0 +1,229 @@
+{
+ "best_metric": 0.8248077630996704,
+ "best_model_checkpoint": "./out/checkpoint-10",
+ "epoch": 3.217391304347826,
+ "eval_steps": 2,
+ "global_step": 20,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.17,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 0.9984,
+ "step": 1
+ },
+ {
+ "epoch": 0.17,
+ "eval_loss": 1.09337317943573,
+ "eval_runtime": 65.1601,
+ "eval_samples_per_second": 1.535,
+ "eval_steps_per_second": 0.767,
+ "step": 1
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.0000000000000002e-06,
+ "loss": 0.9864,
+ "step": 2
+ },
+ {
+ "epoch": 0.35,
+ "eval_loss": 1.0603257417678833,
+ "eval_runtime": 66.8005,
+ "eval_samples_per_second": 1.497,
+ "eval_steps_per_second": 0.748,
+ "step": 2
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.5e-06,
+ "loss": 0.9692,
+ "step": 3
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 2.0000000000000003e-06,
+ "loss": 0.9181,
+ "step": 4
+ },
+ {
+ "epoch": 0.7,
+ "eval_loss": 0.9132175445556641,
+ "eval_runtime": 65.6994,
+ "eval_samples_per_second": 1.522,
+ "eval_steps_per_second": 0.761,
+ "step": 4
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 2.5e-06,
+ "loss": 0.9237,
+ "step": 5
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3e-06,
+ "loss": 0.8843,
+ "step": 6
+ },
+ {
+ "epoch": 1.04,
+ "eval_loss": 0.8623057007789612,
+ "eval_runtime": 64.7101,
+ "eval_samples_per_second": 1.545,
+ "eval_steps_per_second": 0.773,
+ "step": 6
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.5e-06,
+ "loss": 0.8434,
+ "step": 7
+ },
+ {
+ "epoch": 1.3,
+ "learning_rate": 4.000000000000001e-06,
+ "loss": 0.8513,
+ "step": 8
+ },
+ {
+ "epoch": 1.3,
+ "eval_loss": 0.8309622406959534,
+ "eval_runtime": 64.996,
+ "eval_samples_per_second": 1.539,
+ "eval_steps_per_second": 0.769,
+ "step": 8
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 4.5e-06,
+ "loss": 0.8174,
+ "step": 9
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 5e-06,
+ "loss": 0.7957,
+ "step": 10
+ },
+ {
+ "epoch": 1.65,
+ "eval_loss": 0.8248077630996704,
+ "eval_runtime": 65.0362,
+ "eval_samples_per_second": 1.538,
+ "eval_steps_per_second": 0.769,
+ "step": 10
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 4.8776412907378845e-06,
+ "loss": 0.7513,
+ "step": 11
+ },
+ {
+ "epoch": 2.0,
+ "learning_rate": 4.522542485937369e-06,
+ "loss": 0.7823,
+ "step": 12
+ },
+ {
+ "epoch": 2.0,
+ "eval_loss": 0.822129487991333,
+ "eval_runtime": 64.6722,
+ "eval_samples_per_second": 1.546,
+ "eval_steps_per_second": 0.773,
+ "step": 12
+ },
+ {
+ "epoch": 2.09,
+ "learning_rate": 3.969463130731183e-06,
+ "loss": 0.681,
+ "step": 13
+ },
+ {
+ "epoch": 2.26,
+ "learning_rate": 3.272542485937369e-06,
+ "loss": 0.5977,
+ "step": 14
+ },
+ {
+ "epoch": 2.26,
+ "eval_loss": 0.845899224281311,
+ "eval_runtime": 64.9298,
+ "eval_samples_per_second": 1.54,
+ "eval_steps_per_second": 0.77,
+ "step": 14
+ },
+ {
+ "epoch": 2.43,
+ "learning_rate": 2.5e-06,
+ "loss": 0.5913,
+ "step": 15
+ },
+ {
+ "epoch": 2.61,
+ "learning_rate": 1.7274575140626318e-06,
+ "loss": 0.5766,
+ "step": 16
+ },
+ {
+ "epoch": 2.61,
+ "eval_loss": 0.8647727966308594,
+ "eval_runtime": 64.5733,
+ "eval_samples_per_second": 1.549,
+ "eval_steps_per_second": 0.774,
+ "step": 16
+ },
+ {
+ "epoch": 2.78,
+ "learning_rate": 1.0305368692688175e-06,
+ "loss": 0.5096,
+ "step": 17
+ },
+ {
+ "epoch": 2.96,
+ "learning_rate": 4.774575140626317e-07,
+ "loss": 0.546,
+ "step": 18
+ },
+ {
+ "epoch": 2.96,
+ "eval_loss": 0.863722026348114,
+ "eval_runtime": 64.9313,
+ "eval_samples_per_second": 1.54,
+ "eval_steps_per_second": 0.77,
+ "step": 18
+ },
+ {
+ "epoch": 3.04,
+ "learning_rate": 1.223587092621162e-07,
+ "loss": 0.524,
+ "step": 19
+ },
+ {
+ "epoch": 3.22,
+ "learning_rate": 0.0,
+ "loss": 0.5024,
+ "step": 20
+ },
+ {
+ "epoch": 3.22,
+ "eval_loss": 0.863263726234436,
+ "eval_runtime": 65.767,
+ "eval_samples_per_second": 1.521,
+ "eval_steps_per_second": 0.76,
+ "step": 20
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 20,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 5,
+ "total_flos": 5.592050640617472e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-20/training_args.bin b/checkpoint-20/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3b9fc99f7611fc7ae3d5b56c2ade5da036b647e1
--- /dev/null
+++ b/checkpoint-20/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b96db33098f0c4b8328823654626c47a8539e58eff74bcc89a8c6810e5c11e7
+size 4795
diff --git a/checkpoint-5/config.json b/checkpoint-5/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e63d58534ffc3ffaa37510034b9c34b4a4bf835
--- /dev/null
+++ b/checkpoint-5/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/checkpoint-5/generation_config.json b/checkpoint-5/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..532693186de3efbebe0ac31b55ddb88f3f226364
--- /dev/null
+++ b/checkpoint-5/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.0"
+}
diff --git a/checkpoint-5/model-00001-of-00004.safetensors b/checkpoint-5/model-00001-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b9e8ffa124ff7caa800ced7c1b57ad7669e0d3d7
--- /dev/null
+++ b/checkpoint-5/model-00001-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b7b47b367050d0999833de008e6e7ca45d4a64bb5abab4ff1521581c6cb8739
+size 4775355104
diff --git a/checkpoint-5/model-00002-of-00004.safetensors b/checkpoint-5/model-00002-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..483fd2d5c84f64f33d34bffc605301949a354145
--- /dev/null
+++ b/checkpoint-5/model-00002-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1f37b8b76a3a27a5c193383e8c3aa5a7061c649e37b7fc8c7328cb383129b505
+size 4982990072
diff --git a/checkpoint-5/model-00003-of-00004.safetensors b/checkpoint-5/model-00003-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..de97076c7d35d5d073e5ca26d30dbc604033523e
--- /dev/null
+++ b/checkpoint-5/model-00003-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f0513e64f6c87a0575c66e6cd4bd1dab5a24b34fb9a8a82af2a5fc8b0ec0e211
+size 4982990088
diff --git a/checkpoint-5/model-00004-of-00004.safetensors b/checkpoint-5/model-00004-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..abfa247c94bb68c2a423331d50cc3f81d7286704
--- /dev/null
+++ b/checkpoint-5/model-00004-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5f6af91bd63ec3724bb27612e90c3650e4cefb895174bddb939c956ba091468f
+size 3500259224
diff --git a/checkpoint-5/model.safetensors.index.json b/checkpoint-5/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..8b83d48a2949ebf06f421d7198b065d7ee584baf
--- /dev/null
+++ b/checkpoint-5/model.safetensors.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 18241560576
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.norm.weight": "model-00004-of-00004.safetensors"
+ }
+}
diff --git a/checkpoint-5/optimizer.pt b/checkpoint-5/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..615cc31ab29872f8d77480440d5f509d16e9f0c5
--- /dev/null
+++ b/checkpoint-5/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23db6b36bd0375437f1bdfa6aea5261a8dc2af3e550ea621ca5cc087f0f9ea10
+size 14512102791
diff --git a/checkpoint-5/rng_state.pth b/checkpoint-5/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..452f99ac534b9117d836494d73222e3d44e1523b
--- /dev/null
+++ b/checkpoint-5/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6869750f95a25c4e970298a33adf90e2d7ab52680bf3317239bff1b10103235
+size 14575
diff --git a/checkpoint-5/scheduler.pt b/checkpoint-5/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..81f6026470f8b7148d2d0e2bc7d9e2e23b0b83a6
--- /dev/null
+++ b/checkpoint-5/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e69e510ee9d0fe3aaea07594bc17ba236c2a87c5fa93a8fd543357fd4197812
+size 627
diff --git a/checkpoint-5/trainer_state.json b/checkpoint-5/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..c7617d63fde24918a437f46f0cf54d15a4c16fa4
--- /dev/null
+++ b/checkpoint-5/trainer_state.json
@@ -0,0 +1,75 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.8695652173913043,
+ "eval_steps": 2,
+ "global_step": 5,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.17,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 0.9984,
+ "step": 1
+ },
+ {
+ "epoch": 0.17,
+ "eval_loss": 1.09337317943573,
+ "eval_runtime": 65.1601,
+ "eval_samples_per_second": 1.535,
+ "eval_steps_per_second": 0.767,
+ "step": 1
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 1.0000000000000002e-06,
+ "loss": 0.9864,
+ "step": 2
+ },
+ {
+ "epoch": 0.35,
+ "eval_loss": 1.0603257417678833,
+ "eval_runtime": 66.8005,
+ "eval_samples_per_second": 1.497,
+ "eval_steps_per_second": 0.748,
+ "step": 2
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 1.5e-06,
+ "loss": 0.9692,
+ "step": 3
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 2.0000000000000003e-06,
+ "loss": 0.9181,
+ "step": 4
+ },
+ {
+ "epoch": 0.7,
+ "eval_loss": 0.9132175445556641,
+ "eval_runtime": 65.6994,
+ "eval_samples_per_second": 1.522,
+ "eval_steps_per_second": 0.761,
+ "step": 4
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 2.5e-06,
+ "loss": 0.9237,
+ "step": 5
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 20,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 5,
+ "total_flos": 1.398012660154368e+16,
+ "train_batch_size": 2,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-5/training_args.bin b/checkpoint-5/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..3b9fc99f7611fc7ae3d5b56c2ade5da036b647e1
--- /dev/null
+++ b/checkpoint-5/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b96db33098f0c4b8328823654626c47a8539e58eff74bcc89a8c6810e5c11e7
+size 4795
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e63d58534ffc3ffaa37510034b9c34b4a4bf835
--- /dev/null
+++ b/config.json
@@ -0,0 +1,26 @@
+{
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
+ "architectures": [
+ "MistralForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "initializer_range": 0.02,
+ "intermediate_size": 14336,
+ "max_position_embeddings": 32768,
+ "model_type": "mistral",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 8,
+ "rms_norm_eps": 1e-05,
+ "rope_theta": 10000.0,
+ "sliding_window": 4096,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.37.0",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/generation_config.json b/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..532693186de3efbebe0ac31b55ddb88f3f226364
--- /dev/null
+++ b/generation_config.json
@@ -0,0 +1,6 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "transformers_version": "4.37.0"
+}
diff --git a/pytorch_model-00001-of-00004.bin b/pytorch_model-00001-of-00004.bin
new file mode 100644
index 0000000000000000000000000000000000000000..30e658e828a5af0811b19de15b30468b12b73c44
--- /dev/null
+++ b/pytorch_model-00001-of-00004.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4dfcebdd140c22f506735f4244a57ed737dfedcb310a267157ee0886bbab8662
+size 4775373201
diff --git a/pytorch_model-00002-of-00004.bin b/pytorch_model-00002-of-00004.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b2a16e80c739c56d169521cd00b9d075868df59d
--- /dev/null
+++ b/pytorch_model-00002-of-00004.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d70f4a7ab70643e0ec7613381255add228cd4728cf9c67805d2f804a1a47c8b6
+size 4983009101
diff --git a/pytorch_model-00003-of-00004.bin b/pytorch_model-00003-of-00004.bin
new file mode 100644
index 0000000000000000000000000000000000000000..46e9d99309cbcc4aa764e2af5d3f525ddcdc19e1
--- /dev/null
+++ b/pytorch_model-00003-of-00004.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53004df913957614f09ae90273ce4d5d8db76b30ddc7e6a420ea7054af69621e
+size 4983009101
diff --git a/pytorch_model-00004-of-00004.bin b/pytorch_model-00004-of-00004.bin
new file mode 100644
index 0000000000000000000000000000000000000000..5c15d2dedb5b3faf86a0f80589da405a6372472f
--- /dev/null
+++ b/pytorch_model-00004-of-00004.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0f2d46efde5890ed2ab062ab6f75ac62bf877f3e025791d50355c96cde3dd9db
+size 3500271287
diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..93d895e8f8ddb4e1f908849f372f43733ada9fe4
--- /dev/null
+++ b/pytorch_model.bin.index.json
@@ -0,0 +1,298 @@
+{
+ "metadata": {
+ "total_size": 18241560576
+ },
+ "weight_map": {
+ "lm_head.weight": "pytorch_model-00004-of-00004.bin",
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.10.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.10.mlp.down_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.10.mlp.up_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.11.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.11.mlp.down_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.11.mlp.up_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.12.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.12.mlp.down_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.12.mlp.up_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.13.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.13.mlp.down_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.13.mlp.up_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.14.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.14.mlp.down_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.14.mlp.up_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.15.mlp.down_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.15.mlp.up_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.16.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.16.mlp.down_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.16.mlp.up_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.17.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.17.mlp.down_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.17.mlp.up_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.18.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.18.mlp.down_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.18.mlp.up_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.19.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.19.mlp.down_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.19.mlp.up_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.20.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.20.mlp.down_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.20.mlp.up_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.21.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.21.mlp.down_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.21.mlp.up_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.22.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.22.mlp.down_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.22.mlp.up_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.23.mlp.up_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.24.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.24.mlp.down_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.24.mlp.up_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.25.input_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.25.mlp.down_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.25.mlp.up_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.26.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.26.mlp.down_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.26.mlp.up_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00003-of-00004.bin",
+ "model.layers.27.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.27.mlp.down_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.27.mlp.up_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.28.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.28.mlp.down_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.28.mlp.up_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.29.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.29.mlp.down_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.29.mlp.up_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.30.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.30.mlp.down_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.30.mlp.up_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.31.input_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.31.mlp.down_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.31.mlp.up_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00004-of-00004.bin",
+ "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.8.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.8.mlp.down_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.8.mlp.up_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00004.bin",
+ "model.layers.9.input_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.9.mlp.down_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.9.mlp.up_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00002-of-00004.bin",
+ "model.norm.weight": "pytorch_model-00004-of-00004.bin"
+ }
+}
diff --git a/runs/Jan23_10-55-23_64a42648ad36/events.out.tfevents.1706007327.64a42648ad36.9858.0 b/runs/Jan23_10-55-23_64a42648ad36/events.out.tfevents.1706007327.64a42648ad36.9858.0
new file mode 100644
index 0000000000000000000000000000000000000000..21c7c05ea64758ed8f7646e814f9c87d34e47e9d
--- /dev/null
+++ b/runs/Jan23_10-55-23_64a42648ad36/events.out.tfevents.1706007327.64a42648ad36.9858.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8d63500f7b9f0abff6ca7888cf62da3566c6a3140d365eff8e1d26752f390af
+size 11251
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.model b/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3b16a1f5e8af67a396150f3a1dbc4e53466afa38
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "trust_remote_code": false,
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}