diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3a7873f84dfbc90de6f6ce83f3b9fc02e750f046
--- /dev/null
+++ b/README.md
@@ -0,0 +1,143 @@
+---
+license: apache-2.0
+library_name: peft
+tags:
+- generated_from_trainer
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+model-index:
+- name: outputs/qlora-out
+ results: []
+---
+
+
+
+[
](https://github.com/OpenAccess-AI-Collective/axolotl)
+See axolotl config
+
+axolotl version: `0.4.1`
+```yaml
+adapter: qlora
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+bf16: false
+dataset_prepared_path: null
+datasets:
+- ds_tipe: json
+ path: pubmed_continual_pretraning_dataset.jsonl
+ type: completion
+debug: null
+deepspeed: null
+early_stopping_patience: null
+eval_sample_packing: false
+evals_per_epoch: 4
+flash_attention: false
+fp16: null
+fsdp: null
+fsdp_config: null
+gradient_accumulation_steps: 4
+gradient_checkpointing: true
+group_by_length: false
+learning_rate: 0.0002
+load_in_4bit: true
+load_in_8bit: false
+local_rank: null
+logging_steps: 1
+lora_alpha: 16
+lora_dropout: 0.05
+lora_fan_in_fan_out: null
+lora_model_dir: null
+lora_r: 32
+lora_target_linear: true
+lora_target_modules: null
+lr_scheduler: cosine
+micro_batch_size: 8
+model_type: LlamaForCausalLM
+num_epochs: 4
+optimizer: paged_adamw_32bit
+output_dir: ./outputs/qlora-out
+pad_to_sequence_len: false
+resume_from_checkpoint: null
+sample_packing: false
+saves_per_epoch: 1
+sequence_len: 4096
+special_tokens: null
+strict: false
+tf32: false
+tokenizer_type: LlamaTokenizer
+train_on_inputs: false
+val_set_size: 0.05
+wandb_entity: null
+wandb_log_model: null
+wandb_name: null
+wandb_project: null
+wandb_watch: null
+warmup_steps: 10
+weight_decay: 0.0
+xformers_attention: null
+
+```
+
+
+
+# outputs/qlora-out
+
+This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) on the None dataset.
+It achieves the following results on the evaluation set:
+- Loss: 1.7613
+
+## Model description
+
+More information needed
+
+## Intended uses & limitations
+
+More information needed
+
+## Training and evaluation data
+
+More information needed
+
+## Training procedure
+
+### Training hyperparameters
+
+The following hyperparameters were used during training:
+- learning_rate: 0.0002
+- train_batch_size: 8
+- eval_batch_size: 8
+- seed: 42
+- gradient_accumulation_steps: 4
+- total_train_batch_size: 32
+- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
+- lr_scheduler_type: cosine
+- lr_scheduler_warmup_steps: 10
+- num_epochs: 4
+
+### Training results
+
+| Training Loss | Epoch | Step | Validation Loss |
+|:-------------:|:------:|:----:|:---------------:|
+| 1.768 | 0.0336 | 1 | 1.8649 |
+| 1.8084 | 0.2689 | 8 | 1.8317 |
+| 1.633 | 0.5378 | 16 | 1.7833 |
+| 1.6737 | 0.8067 | 24 | 1.7644 |
+| 1.6722 | 1.0756 | 32 | 1.7601 |
+| 1.7162 | 1.3445 | 40 | 1.7571 |
+| 1.7046 | 1.6134 | 48 | 1.7558 |
+| 1.6714 | 1.8824 | 56 | 1.7564 |
+| 1.6249 | 2.1513 | 64 | 1.7566 |
+| 1.5604 | 2.4202 | 72 | 1.7599 |
+| 1.7003 | 2.6891 | 80 | 1.7614 |
+| 1.7115 | 2.9580 | 88 | 1.7605 |
+| 1.5937 | 3.2269 | 96 | 1.7609 |
+| 1.655 | 3.4958 | 104 | 1.7612 |
+| 1.5829 | 3.7647 | 112 | 1.7613 |
+
+
+### Framework versions
+
+- PEFT 0.11.1
+- Transformers 4.41.1
+- Pytorch 2.1.2+cu121
+- Datasets 2.19.1
+- Tokenizers 0.19.1
\ No newline at end of file
diff --git a/adapter_config.json b/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/adapter_model.bin b/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8ed705314a209e3238104543c20f116844bb81be
--- /dev/null
+++ b/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a07fceb0f4a213b192376861226c495015857f3a4db9a7662c58acf847cac9b7
+size 50573978
diff --git a/checkpoint-116/README.md b/checkpoint-116/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1ccd431539a8f1507d8755a9c3ba5e5b2897978
--- /dev/null
+++ b/checkpoint-116/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.11.1
\ No newline at end of file
diff --git a/checkpoint-116/adapter_config.json b/checkpoint-116/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/checkpoint-116/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-116/adapter_model.safetensors b/checkpoint-116/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c2f8ae14c1c003567ecce01c661cf0c25ebf0913
--- /dev/null
+++ b/checkpoint-116/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8668d68993cbafac06993db9d1c83bc618398531aab80fdab4198b3b92fe2ccb
+size 50503848
diff --git a/checkpoint-116/optimizer.pt b/checkpoint-116/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8a196c686c309683333cfbd09c94e815862cb537
--- /dev/null
+++ b/checkpoint-116/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:13f111d1655d1c0c5f946c1a507f8decff70637f9ab53f6b1f6bc534501332c0
+size 202035450
diff --git a/checkpoint-116/rng_state.pth b/checkpoint-116/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e056ab6a565d11467a679306a9742ecdd4b928da
--- /dev/null
+++ b/checkpoint-116/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:edbf3bea1d4f693a677a0bf33551e5e3063cbd96faf6ce6d46f9d3b4ff62532d
+size 14244
diff --git a/checkpoint-116/scheduler.pt b/checkpoint-116/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f09aa9eb766b3356ec7e327795e67e35471f1984
--- /dev/null
+++ b/checkpoint-116/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68424c30349bda77a4657d1902f52c5a348248ab657a58e4aee2ebfc370c25ab
+size 1064
diff --git a/checkpoint-116/special_tokens_map.json b/checkpoint-116/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-116/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-116/tokenizer.model b/checkpoint-116/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-116/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-116/tokenizer_config.json b/checkpoint-116/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/checkpoint-116/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/checkpoint-116/trainer_state.json b/checkpoint-116/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..8e65f658dc6fd56e9f30910877602ef09385680e
--- /dev/null
+++ b/checkpoint-116/trainer_state.json
@@ -0,0 +1,965 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 3.899159663865546,
+ "eval_steps": 8,
+ "global_step": 116,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.03361344537815126,
+ "grad_norm": 0.115234375,
+ "learning_rate": 2e-05,
+ "loss": 1.768,
+ "step": 1
+ },
+ {
+ "epoch": 0.03361344537815126,
+ "eval_loss": 1.8648816347122192,
+ "eval_runtime": 18.2501,
+ "eval_samples_per_second": 2.74,
+ "eval_steps_per_second": 0.384,
+ "step": 1
+ },
+ {
+ "epoch": 0.06722689075630252,
+ "grad_norm": 0.10888671875,
+ "learning_rate": 4e-05,
+ "loss": 1.7838,
+ "step": 2
+ },
+ {
+ "epoch": 0.10084033613445378,
+ "grad_norm": 0.126953125,
+ "learning_rate": 6e-05,
+ "loss": 1.9413,
+ "step": 3
+ },
+ {
+ "epoch": 0.13445378151260504,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 8e-05,
+ "loss": 1.7757,
+ "step": 4
+ },
+ {
+ "epoch": 0.16806722689075632,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.0001,
+ "loss": 1.735,
+ "step": 5
+ },
+ {
+ "epoch": 0.20168067226890757,
+ "grad_norm": 0.10791015625,
+ "learning_rate": 0.00012,
+ "loss": 1.8269,
+ "step": 6
+ },
+ {
+ "epoch": 0.23529411764705882,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00014,
+ "loss": 1.8552,
+ "step": 7
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00016,
+ "loss": 1.8084,
+ "step": 8
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "eval_loss": 1.8317129611968994,
+ "eval_runtime": 19.6984,
+ "eval_samples_per_second": 2.538,
+ "eval_steps_per_second": 0.355,
+ "step": 8
+ },
+ {
+ "epoch": 0.3025210084033613,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.00018,
+ "loss": 1.7158,
+ "step": 9
+ },
+ {
+ "epoch": 0.33613445378151263,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0002,
+ "loss": 1.8702,
+ "step": 10
+ },
+ {
+ "epoch": 0.3697478991596639,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00019995608365087946,
+ "loss": 1.8307,
+ "step": 11
+ },
+ {
+ "epoch": 0.40336134453781514,
+ "grad_norm": 0.11474609375,
+ "learning_rate": 0.00019982437317643217,
+ "loss": 1.6583,
+ "step": 12
+ },
+ {
+ "epoch": 0.4369747899159664,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0001996049842615217,
+ "loss": 1.6663,
+ "step": 13
+ },
+ {
+ "epoch": 0.47058823529411764,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00019929810960135172,
+ "loss": 1.7388,
+ "step": 14
+ },
+ {
+ "epoch": 0.5042016806722689,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.0001989040187322164,
+ "loss": 1.7485,
+ "step": 15
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00019842305779475968,
+ "loss": 1.633,
+ "step": 16
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "eval_loss": 1.7832777500152588,
+ "eval_runtime": 19.6833,
+ "eval_samples_per_second": 2.54,
+ "eval_steps_per_second": 0.356,
+ "step": 16
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 0.12109375,
+ "learning_rate": 0.0001978556492299504,
+ "loss": 1.8373,
+ "step": 17
+ },
+ {
+ "epoch": 0.6050420168067226,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 0.0001972022914080411,
+ "loss": 1.6552,
+ "step": 18
+ },
+ {
+ "epoch": 0.6386554621848739,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019646355819083589,
+ "loss": 1.8113,
+ "step": 19
+ },
+ {
+ "epoch": 0.6722689075630253,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00019564009842765225,
+ "loss": 1.6544,
+ "step": 20
+ },
+ {
+ "epoch": 0.7058823529411765,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00019473263538541914,
+ "loss": 1.6649,
+ "step": 21
+ },
+ {
+ "epoch": 0.7394957983193278,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 0.0001937419661134121,
+ "loss": 1.6868,
+ "step": 22
+ },
+ {
+ "epoch": 0.773109243697479,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019266896074318334,
+ "loss": 1.7762,
+ "step": 23
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "grad_norm": 0.11279296875,
+ "learning_rate": 0.00019151456172430183,
+ "loss": 1.6737,
+ "step": 24
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "eval_loss": 1.7643933296203613,
+ "eval_runtime": 19.6308,
+ "eval_samples_per_second": 2.547,
+ "eval_steps_per_second": 0.357,
+ "step": 24
+ },
+ {
+ "epoch": 0.8403361344537815,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 0.00019027978299657436,
+ "loss": 1.6401,
+ "step": 25
+ },
+ {
+ "epoch": 0.8739495798319328,
+ "grad_norm": 0.099609375,
+ "learning_rate": 0.00018896570909947475,
+ "loss": 1.7068,
+ "step": 26
+ },
+ {
+ "epoch": 0.907563025210084,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.0001875734942195637,
+ "loss": 1.8112,
+ "step": 27
+ },
+ {
+ "epoch": 0.9411764705882353,
+ "grad_norm": 0.1162109375,
+ "learning_rate": 0.00018610436117673555,
+ "loss": 1.6596,
+ "step": 28
+ },
+ {
+ "epoch": 0.9747899159663865,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.0001845596003501826,
+ "loss": 1.7936,
+ "step": 29
+ },
+ {
+ "epoch": 1.0084033613445378,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001829405685450202,
+ "loss": 1.7947,
+ "step": 30
+ },
+ {
+ "epoch": 1.0420168067226891,
+ "grad_norm": 0.20703125,
+ "learning_rate": 0.00018124868780056814,
+ "loss": 1.6887,
+ "step": 31
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 0.00017948544414133534,
+ "loss": 1.6722,
+ "step": 32
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "eval_loss": 1.7600828409194946,
+ "eval_runtime": 19.7105,
+ "eval_samples_per_second": 2.537,
+ "eval_steps_per_second": 0.355,
+ "step": 32
+ },
+ {
+ "epoch": 1.1092436974789917,
+ "grad_norm": 0.09814453125,
+ "learning_rate": 0.00017765238627180424,
+ "loss": 1.7145,
+ "step": 33
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 0.10693359375,
+ "learning_rate": 0.00017575112421616202,
+ "loss": 1.6609,
+ "step": 34
+ },
+ {
+ "epoch": 1.1764705882352942,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00017378332790417273,
+ "loss": 1.6681,
+ "step": 35
+ },
+ {
+ "epoch": 1.2100840336134453,
+ "grad_norm": 0.11767578125,
+ "learning_rate": 0.00017175072570443312,
+ "loss": 1.6641,
+ "step": 36
+ },
+ {
+ "epoch": 1.2436974789915967,
+ "grad_norm": 0.11376953125,
+ "learning_rate": 0.00016965510290629972,
+ "loss": 1.7011,
+ "step": 37
+ },
+ {
+ "epoch": 1.2773109243697478,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00016749830015182107,
+ "loss": 1.7171,
+ "step": 38
+ },
+ {
+ "epoch": 1.3109243697478992,
+ "grad_norm": 0.103515625,
+ "learning_rate": 0.00016528221181905217,
+ "loss": 1.6333,
+ "step": 39
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "grad_norm": 0.111328125,
+ "learning_rate": 0.00016300878435817113,
+ "loss": 1.7162,
+ "step": 40
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "eval_loss": 1.757140040397644,
+ "eval_runtime": 19.6485,
+ "eval_samples_per_second": 2.545,
+ "eval_steps_per_second": 0.356,
+ "step": 40
+ },
+ {
+ "epoch": 1.3781512605042017,
+ "grad_norm": 0.1484375,
+ "learning_rate": 0.00016068001458185936,
+ "loss": 1.6501,
+ "step": 41
+ },
+ {
+ "epoch": 1.4117647058823528,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001582979479114472,
+ "loss": 1.6446,
+ "step": 42
+ },
+ {
+ "epoch": 1.4453781512605042,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00015586467658036524,
+ "loss": 1.7104,
+ "step": 43
+ },
+ {
+ "epoch": 1.4789915966386555,
+ "grad_norm": 0.109375,
+ "learning_rate": 0.0001533823377964791,
+ "loss": 1.6146,
+ "step": 44
+ },
+ {
+ "epoch": 1.5126050420168067,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00015085311186492206,
+ "loss": 1.6448,
+ "step": 45
+ },
+ {
+ "epoch": 1.5462184873949578,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00014827922027307451,
+ "loss": 1.6735,
+ "step": 46
+ },
+ {
+ "epoch": 1.5798319327731094,
+ "grad_norm": 0.1181640625,
+ "learning_rate": 0.0001456629237393713,
+ "loss": 1.6604,
+ "step": 47
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00014300652022765207,
+ "loss": 1.7046,
+ "step": 48
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "eval_loss": 1.7558497190475464,
+ "eval_runtime": 19.7723,
+ "eval_samples_per_second": 2.529,
+ "eval_steps_per_second": 0.354,
+ "step": 48
+ },
+ {
+ "epoch": 1.6470588235294117,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00014031234292879725,
+ "loss": 1.694,
+ "step": 49
+ },
+ {
+ "epoch": 1.680672268907563,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00013758275821142382,
+ "loss": 1.625,
+ "step": 50
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 0.123046875,
+ "learning_rate": 0.0001348201635434399,
+ "loss": 1.6919,
+ "step": 51
+ },
+ {
+ "epoch": 1.7478991596638656,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.00013202698538628376,
+ "loss": 1.6777,
+ "step": 52
+ },
+ {
+ "epoch": 1.7815126050420167,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00012920567706369758,
+ "loss": 1.7762,
+ "step": 53
+ },
+ {
+ "epoch": 1.815126050420168,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00012635871660690676,
+ "loss": 1.6668,
+ "step": 54
+ },
+ {
+ "epoch": 1.8487394957983194,
+ "grad_norm": 0.125,
+ "learning_rate": 0.00012348860457809838,
+ "loss": 1.7061,
+ "step": 55
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00012059786187410984,
+ "loss": 1.6714,
+ "step": 56
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "eval_loss": 1.7563551664352417,
+ "eval_runtime": 19.6588,
+ "eval_samples_per_second": 2.543,
+ "eval_steps_per_second": 0.356,
+ "step": 56
+ },
+ {
+ "epoch": 1.9159663865546217,
+ "grad_norm": 0.1279296875,
+ "learning_rate": 0.0001176890275122573,
+ "loss": 1.6318,
+ "step": 57
+ },
+ {
+ "epoch": 1.949579831932773,
+ "grad_norm": 0.1318359375,
+ "learning_rate": 0.00011476465640024814,
+ "loss": 1.6693,
+ "step": 58
+ },
+ {
+ "epoch": 1.9831932773109244,
+ "grad_norm": 0.12353515625,
+ "learning_rate": 0.00011182731709213659,
+ "loss": 1.5927,
+ "step": 59
+ },
+ {
+ "epoch": 2.0168067226890756,
+ "grad_norm": 0.138671875,
+ "learning_rate": 0.00010887958953229349,
+ "loss": 1.6558,
+ "step": 60
+ },
+ {
+ "epoch": 2.0504201680672267,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.00010592406278937144,
+ "loss": 1.7352,
+ "step": 61
+ },
+ {
+ "epoch": 2.0840336134453783,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00010296333278225599,
+ "loss": 1.6216,
+ "step": 62
+ },
+ {
+ "epoch": 2.1176470588235294,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.0001,
+ "loss": 1.6365,
+ "step": 63
+ },
+ {
+ "epoch": 2.1512605042016806,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 9.703666721774402e-05,
+ "loss": 1.6249,
+ "step": 64
+ },
+ {
+ "epoch": 2.1512605042016806,
+ "eval_loss": 1.756639838218689,
+ "eval_runtime": 19.5951,
+ "eval_samples_per_second": 2.552,
+ "eval_steps_per_second": 0.357,
+ "step": 64
+ },
+ {
+ "epoch": 2.184873949579832,
+ "grad_norm": 0.1357421875,
+ "learning_rate": 9.407593721062859e-05,
+ "loss": 1.653,
+ "step": 65
+ },
+ {
+ "epoch": 2.2184873949579833,
+ "grad_norm": 0.146484375,
+ "learning_rate": 9.112041046770653e-05,
+ "loss": 1.6545,
+ "step": 66
+ },
+ {
+ "epoch": 2.2521008403361344,
+ "grad_norm": 0.13671875,
+ "learning_rate": 8.817268290786343e-05,
+ "loss": 1.5787,
+ "step": 67
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 0.1328125,
+ "learning_rate": 8.523534359975189e-05,
+ "loss": 1.6532,
+ "step": 68
+ },
+ {
+ "epoch": 2.3193277310924367,
+ "grad_norm": 0.1396484375,
+ "learning_rate": 8.231097248774274e-05,
+ "loss": 1.6784,
+ "step": 69
+ },
+ {
+ "epoch": 2.3529411764705883,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 7.940213812589018e-05,
+ "loss": 1.5721,
+ "step": 70
+ },
+ {
+ "epoch": 2.3865546218487395,
+ "grad_norm": 0.1416015625,
+ "learning_rate": 7.651139542190164e-05,
+ "loss": 1.5836,
+ "step": 71
+ },
+ {
+ "epoch": 2.4201680672268906,
+ "grad_norm": 0.146484375,
+ "learning_rate": 7.364128339309326e-05,
+ "loss": 1.5604,
+ "step": 72
+ },
+ {
+ "epoch": 2.4201680672268906,
+ "eval_loss": 1.7598735094070435,
+ "eval_runtime": 19.7508,
+ "eval_samples_per_second": 2.532,
+ "eval_steps_per_second": 0.354,
+ "step": 72
+ },
+ {
+ "epoch": 2.453781512605042,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 7.079432293630244e-05,
+ "loss": 1.6259,
+ "step": 73
+ },
+ {
+ "epoch": 2.4873949579831933,
+ "grad_norm": 0.1484375,
+ "learning_rate": 6.797301461371625e-05,
+ "loss": 1.5811,
+ "step": 74
+ },
+ {
+ "epoch": 2.5210084033613445,
+ "grad_norm": 0.14453125,
+ "learning_rate": 6.517983645656014e-05,
+ "loss": 1.4929,
+ "step": 75
+ },
+ {
+ "epoch": 2.5546218487394956,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 6.24172417885762e-05,
+ "loss": 1.7014,
+ "step": 76
+ },
+ {
+ "epoch": 2.588235294117647,
+ "grad_norm": 0.1484375,
+ "learning_rate": 5.96876570712028e-05,
+ "loss": 1.5623,
+ "step": 77
+ },
+ {
+ "epoch": 2.6218487394957983,
+ "grad_norm": 0.1474609375,
+ "learning_rate": 5.699347977234799e-05,
+ "loss": 1.6006,
+ "step": 78
+ },
+ {
+ "epoch": 2.6554621848739495,
+ "grad_norm": 0.150390625,
+ "learning_rate": 5.43370762606287e-05,
+ "loss": 1.6641,
+ "step": 79
+ },
+ {
+ "epoch": 2.689075630252101,
+ "grad_norm": 0.15234375,
+ "learning_rate": 5.172077972692553e-05,
+ "loss": 1.7003,
+ "step": 80
+ },
+ {
+ "epoch": 2.689075630252101,
+ "eval_loss": 1.761399269104004,
+ "eval_runtime": 19.6692,
+ "eval_samples_per_second": 2.542,
+ "eval_steps_per_second": 0.356,
+ "step": 80
+ },
+ {
+ "epoch": 2.722689075630252,
+ "grad_norm": 0.154296875,
+ "learning_rate": 4.914688813507797e-05,
+ "loss": 1.6923,
+ "step": 81
+ },
+ {
+ "epoch": 2.7563025210084033,
+ "grad_norm": 0.158203125,
+ "learning_rate": 4.661766220352097e-05,
+ "loss": 1.6819,
+ "step": 82
+ },
+ {
+ "epoch": 2.7899159663865545,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 4.4135323419634766e-05,
+ "loss": 1.5649,
+ "step": 83
+ },
+ {
+ "epoch": 2.8235294117647056,
+ "grad_norm": 0.1494140625,
+ "learning_rate": 4.170205208855281e-05,
+ "loss": 1.608,
+ "step": 84
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 0.1611328125,
+ "learning_rate": 3.931998541814069e-05,
+ "loss": 1.5474,
+ "step": 85
+ },
+ {
+ "epoch": 2.8907563025210083,
+ "grad_norm": 0.1552734375,
+ "learning_rate": 3.69912156418289e-05,
+ "loss": 1.6484,
+ "step": 86
+ },
+ {
+ "epoch": 2.92436974789916,
+ "grad_norm": 0.1484375,
+ "learning_rate": 3.471778818094785e-05,
+ "loss": 1.6145,
+ "step": 87
+ },
+ {
+ "epoch": 2.957983193277311,
+ "grad_norm": 0.158203125,
+ "learning_rate": 3.250169984817897e-05,
+ "loss": 1.7115,
+ "step": 88
+ },
+ {
+ "epoch": 2.957983193277311,
+ "eval_loss": 1.7605273723602295,
+ "eval_runtime": 19.7632,
+ "eval_samples_per_second": 2.53,
+ "eval_steps_per_second": 0.354,
+ "step": 88
+ },
+ {
+ "epoch": 2.991596638655462,
+ "grad_norm": 0.1591796875,
+ "learning_rate": 3.034489709370033e-05,
+ "loss": 1.6485,
+ "step": 89
+ },
+ {
+ "epoch": 3.0252100840336134,
+ "grad_norm": 0.16015625,
+ "learning_rate": 2.8249274295566864e-05,
+ "loss": 1.5097,
+ "step": 90
+ },
+ {
+ "epoch": 3.0588235294117645,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 2.6216672095827266e-05,
+ "loss": 1.5918,
+ "step": 91
+ },
+ {
+ "epoch": 3.092436974789916,
+ "grad_norm": 0.1484375,
+ "learning_rate": 2.4248875783837987e-05,
+ "loss": 1.6116,
+ "step": 92
+ },
+ {
+ "epoch": 3.1260504201680672,
+ "grad_norm": 0.1552734375,
+ "learning_rate": 2.234761372819577e-05,
+ "loss": 1.5989,
+ "step": 93
+ },
+ {
+ "epoch": 3.1596638655462184,
+ "grad_norm": 0.154296875,
+ "learning_rate": 2.0514555858664663e-05,
+ "loss": 1.6061,
+ "step": 94
+ },
+ {
+ "epoch": 3.19327731092437,
+ "grad_norm": 0.16015625,
+ "learning_rate": 1.875131219943187e-05,
+ "loss": 1.549,
+ "step": 95
+ },
+ {
+ "epoch": 3.226890756302521,
+ "grad_norm": 0.15625,
+ "learning_rate": 1.7059431454979824e-05,
+ "loss": 1.5937,
+ "step": 96
+ },
+ {
+ "epoch": 3.226890756302521,
+ "eval_loss": 1.7609126567840576,
+ "eval_runtime": 19.7273,
+ "eval_samples_per_second": 2.535,
+ "eval_steps_per_second": 0.355,
+ "step": 96
+ },
+ {
+ "epoch": 3.2605042016806722,
+ "grad_norm": 0.1611328125,
+ "learning_rate": 1.5440399649817385e-05,
+ "loss": 1.6636,
+ "step": 97
+ },
+ {
+ "epoch": 3.2941176470588234,
+ "grad_norm": 0.15625,
+ "learning_rate": 1.3895638823264446e-05,
+ "loss": 1.6452,
+ "step": 98
+ },
+ {
+ "epoch": 3.327731092436975,
+ "grad_norm": 0.146484375,
+ "learning_rate": 1.2426505780436326e-05,
+ "loss": 1.6122,
+ "step": 99
+ },
+ {
+ "epoch": 3.361344537815126,
+ "grad_norm": 0.154296875,
+ "learning_rate": 1.103429090052528e-05,
+ "loss": 1.7153,
+ "step": 100
+ },
+ {
+ "epoch": 3.3949579831932772,
+ "grad_norm": 0.154296875,
+ "learning_rate": 9.720217003425647e-06,
+ "loss": 1.5094,
+ "step": 101
+ },
+ {
+ "epoch": 3.4285714285714284,
+ "grad_norm": 0.146484375,
+ "learning_rate": 8.485438275698154e-06,
+ "loss": 1.5703,
+ "step": 102
+ },
+ {
+ "epoch": 3.46218487394958,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 7.331039256816663e-06,
+ "loss": 1.5971,
+ "step": 103
+ },
+ {
+ "epoch": 3.495798319327731,
+ "grad_norm": 0.158203125,
+ "learning_rate": 6.258033886587911e-06,
+ "loss": 1.655,
+ "step": 104
+ },
+ {
+ "epoch": 3.495798319327731,
+ "eval_loss": 1.7612451314926147,
+ "eval_runtime": 19.7385,
+ "eval_samples_per_second": 2.533,
+ "eval_steps_per_second": 0.355,
+ "step": 104
+ },
+ {
+ "epoch": 3.5294117647058822,
+ "grad_norm": 0.158203125,
+ "learning_rate": 5.267364614580861e-06,
+ "loss": 1.6928,
+ "step": 105
+ },
+ {
+ "epoch": 3.5630252100840334,
+ "grad_norm": 0.1474609375,
+ "learning_rate": 4.359901572347758e-06,
+ "loss": 1.6083,
+ "step": 106
+ },
+ {
+ "epoch": 3.596638655462185,
+ "grad_norm": 0.1591796875,
+ "learning_rate": 3.5364418091641373e-06,
+ "loss": 1.6547,
+ "step": 107
+ },
+ {
+ "epoch": 3.630252100840336,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 2.7977085919589254e-06,
+ "loss": 1.5425,
+ "step": 108
+ },
+ {
+ "epoch": 3.6638655462184873,
+ "grad_norm": 0.154296875,
+ "learning_rate": 2.144350770049597e-06,
+ "loss": 1.5763,
+ "step": 109
+ },
+ {
+ "epoch": 3.697478991596639,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 1.576942205240317e-06,
+ "loss": 1.513,
+ "step": 110
+ },
+ {
+ "epoch": 3.73109243697479,
+ "grad_norm": 0.15234375,
+ "learning_rate": 1.0959812677835968e-06,
+ "loss": 1.5338,
+ "step": 111
+ },
+ {
+ "epoch": 3.764705882352941,
+ "grad_norm": 0.150390625,
+ "learning_rate": 7.018903986483083e-07,
+ "loss": 1.5829,
+ "step": 112
+ },
+ {
+ "epoch": 3.764705882352941,
+ "eval_loss": 1.7612619400024414,
+ "eval_runtime": 19.83,
+ "eval_samples_per_second": 2.521,
+ "eval_steps_per_second": 0.353,
+ "step": 112
+ },
+ {
+ "epoch": 3.7983193277310923,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 3.950157384783104e-07,
+ "loss": 1.5591,
+ "step": 113
+ },
+ {
+ "epoch": 3.831932773109244,
+ "grad_norm": 0.1533203125,
+ "learning_rate": 1.7562682356786487e-07,
+ "loss": 1.6213,
+ "step": 114
+ },
+ {
+ "epoch": 3.865546218487395,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 4.391634912056519e-08,
+ "loss": 1.6246,
+ "step": 115
+ },
+ {
+ "epoch": 3.899159663865546,
+ "grad_norm": 0.1591796875,
+ "learning_rate": 0.0,
+ "loss": 1.5533,
+ "step": 116
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 116,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 29,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.3243100413820928e+16,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-116/training_args.bin b/checkpoint-116/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c07aacc14ae35e4961f88179b331f061deef6ee1
--- /dev/null
+++ b/checkpoint-116/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f22338230e6fb5499b3cb737d6ee991fc1b36ba21747e343dbc66d770479d2b
+size 5944
diff --git a/checkpoint-29/README.md b/checkpoint-29/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1ccd431539a8f1507d8755a9c3ba5e5b2897978
--- /dev/null
+++ b/checkpoint-29/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.11.1
\ No newline at end of file
diff --git a/checkpoint-29/adapter_config.json b/checkpoint-29/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/checkpoint-29/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-29/adapter_model.safetensors b/checkpoint-29/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5e0844b9cf95a1f91107276689c6a204f10d1830
--- /dev/null
+++ b/checkpoint-29/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f3306c7c3271b6df472652a3e758371296cdd9260f28eae96c6d5c93e8f261b
+size 50503848
diff --git a/checkpoint-29/optimizer.pt b/checkpoint-29/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a18c2f4e2286fb9f8b4470cf86d94bbfe6610def
--- /dev/null
+++ b/checkpoint-29/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60450a89e284ab0412512635b6713a0507d3a55dc20cb18cd142230fe92e18c8
+size 202035450
diff --git a/checkpoint-29/rng_state.pth b/checkpoint-29/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b6fa1b1b3c6ac0284a020fecd5590ae0ab72dea9
--- /dev/null
+++ b/checkpoint-29/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f5dff5ca7ee6d9737c0fb532125f4108aa3bd942be0c6c415c0eee299436cfee
+size 14244
diff --git a/checkpoint-29/scheduler.pt b/checkpoint-29/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6f063c1a6852ef8c3a95f27c4a7fddee4c12090d
--- /dev/null
+++ b/checkpoint-29/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82e0ea69a0f2d46a8611802e20de4ba9ab4c81307121d85f58745fed7e6bfae6
+size 1064
diff --git a/checkpoint-29/special_tokens_map.json b/checkpoint-29/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-29/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-29/tokenizer.model b/checkpoint-29/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-29/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-29/tokenizer_config.json b/checkpoint-29/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/checkpoint-29/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/checkpoint-29/trainer_state.json b/checkpoint-29/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..dd095afd7011bcf0f72a191bef8f28dee83a3a87
--- /dev/null
+++ b/checkpoint-29/trainer_state.json
@@ -0,0 +1,268 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.9747899159663865,
+ "eval_steps": 8,
+ "global_step": 29,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.03361344537815126,
+ "grad_norm": 0.115234375,
+ "learning_rate": 2e-05,
+ "loss": 1.768,
+ "step": 1
+ },
+ {
+ "epoch": 0.03361344537815126,
+ "eval_loss": 1.8648816347122192,
+ "eval_runtime": 18.2501,
+ "eval_samples_per_second": 2.74,
+ "eval_steps_per_second": 0.384,
+ "step": 1
+ },
+ {
+ "epoch": 0.06722689075630252,
+ "grad_norm": 0.10888671875,
+ "learning_rate": 4e-05,
+ "loss": 1.7838,
+ "step": 2
+ },
+ {
+ "epoch": 0.10084033613445378,
+ "grad_norm": 0.126953125,
+ "learning_rate": 6e-05,
+ "loss": 1.9413,
+ "step": 3
+ },
+ {
+ "epoch": 0.13445378151260504,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 8e-05,
+ "loss": 1.7757,
+ "step": 4
+ },
+ {
+ "epoch": 0.16806722689075632,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.0001,
+ "loss": 1.735,
+ "step": 5
+ },
+ {
+ "epoch": 0.20168067226890757,
+ "grad_norm": 0.10791015625,
+ "learning_rate": 0.00012,
+ "loss": 1.8269,
+ "step": 6
+ },
+ {
+ "epoch": 0.23529411764705882,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00014,
+ "loss": 1.8552,
+ "step": 7
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00016,
+ "loss": 1.8084,
+ "step": 8
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "eval_loss": 1.8317129611968994,
+ "eval_runtime": 19.6984,
+ "eval_samples_per_second": 2.538,
+ "eval_steps_per_second": 0.355,
+ "step": 8
+ },
+ {
+ "epoch": 0.3025210084033613,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.00018,
+ "loss": 1.7158,
+ "step": 9
+ },
+ {
+ "epoch": 0.33613445378151263,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0002,
+ "loss": 1.8702,
+ "step": 10
+ },
+ {
+ "epoch": 0.3697478991596639,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00019995608365087946,
+ "loss": 1.8307,
+ "step": 11
+ },
+ {
+ "epoch": 0.40336134453781514,
+ "grad_norm": 0.11474609375,
+ "learning_rate": 0.00019982437317643217,
+ "loss": 1.6583,
+ "step": 12
+ },
+ {
+ "epoch": 0.4369747899159664,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0001996049842615217,
+ "loss": 1.6663,
+ "step": 13
+ },
+ {
+ "epoch": 0.47058823529411764,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00019929810960135172,
+ "loss": 1.7388,
+ "step": 14
+ },
+ {
+ "epoch": 0.5042016806722689,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.0001989040187322164,
+ "loss": 1.7485,
+ "step": 15
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00019842305779475968,
+ "loss": 1.633,
+ "step": 16
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "eval_loss": 1.7832777500152588,
+ "eval_runtime": 19.6833,
+ "eval_samples_per_second": 2.54,
+ "eval_steps_per_second": 0.356,
+ "step": 16
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 0.12109375,
+ "learning_rate": 0.0001978556492299504,
+ "loss": 1.8373,
+ "step": 17
+ },
+ {
+ "epoch": 0.6050420168067226,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 0.0001972022914080411,
+ "loss": 1.6552,
+ "step": 18
+ },
+ {
+ "epoch": 0.6386554621848739,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019646355819083589,
+ "loss": 1.8113,
+ "step": 19
+ },
+ {
+ "epoch": 0.6722689075630253,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00019564009842765225,
+ "loss": 1.6544,
+ "step": 20
+ },
+ {
+ "epoch": 0.7058823529411765,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00019473263538541914,
+ "loss": 1.6649,
+ "step": 21
+ },
+ {
+ "epoch": 0.7394957983193278,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 0.0001937419661134121,
+ "loss": 1.6868,
+ "step": 22
+ },
+ {
+ "epoch": 0.773109243697479,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019266896074318334,
+ "loss": 1.7762,
+ "step": 23
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "grad_norm": 0.11279296875,
+ "learning_rate": 0.00019151456172430183,
+ "loss": 1.6737,
+ "step": 24
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "eval_loss": 1.7643933296203613,
+ "eval_runtime": 19.6308,
+ "eval_samples_per_second": 2.547,
+ "eval_steps_per_second": 0.357,
+ "step": 24
+ },
+ {
+ "epoch": 0.8403361344537815,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 0.00019027978299657436,
+ "loss": 1.6401,
+ "step": 25
+ },
+ {
+ "epoch": 0.8739495798319328,
+ "grad_norm": 0.099609375,
+ "learning_rate": 0.00018896570909947475,
+ "loss": 1.7068,
+ "step": 26
+ },
+ {
+ "epoch": 0.907563025210084,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.0001875734942195637,
+ "loss": 1.8112,
+ "step": 27
+ },
+ {
+ "epoch": 0.9411764705882353,
+ "grad_norm": 0.1162109375,
+ "learning_rate": 0.00018610436117673555,
+ "loss": 1.6596,
+ "step": 28
+ },
+ {
+ "epoch": 0.9747899159663865,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.0001845596003501826,
+ "loss": 1.7936,
+ "step": 29
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 116,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 29,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 3366220896141312.0,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-29/training_args.bin b/checkpoint-29/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c07aacc14ae35e4961f88179b331f061deef6ee1
--- /dev/null
+++ b/checkpoint-29/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f22338230e6fb5499b3cb737d6ee991fc1b36ba21747e343dbc66d770479d2b
+size 5944
diff --git a/checkpoint-58/README.md b/checkpoint-58/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1ccd431539a8f1507d8755a9c3ba5e5b2897978
--- /dev/null
+++ b/checkpoint-58/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.11.1
\ No newline at end of file
diff --git a/checkpoint-58/adapter_config.json b/checkpoint-58/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/checkpoint-58/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-58/adapter_model.safetensors b/checkpoint-58/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bfd700946ae485ed0a53abce0fa0d65c511cb467
--- /dev/null
+++ b/checkpoint-58/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f07ef1ad6cfdd6061f1cad1529524d822d6c21409b40d6ca2c11e7a2ebd9b03f
+size 50503848
diff --git a/checkpoint-58/optimizer.pt b/checkpoint-58/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c700fe51d3f01608933be8ff25ecc184b8829f27
--- /dev/null
+++ b/checkpoint-58/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:712ca45af3a346ebbc26929883fe200d0d4fa0625f5f02944a41b2721a0dac4f
+size 202035450
diff --git a/checkpoint-58/rng_state.pth b/checkpoint-58/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..709982b4f1bb181ce340f82c833859051709cf88
--- /dev/null
+++ b/checkpoint-58/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e2714773be96a258d965b3d5be5d9e51b91e01c435077d016f8a6cad6f5455b
+size 14244
diff --git a/checkpoint-58/scheduler.pt b/checkpoint-58/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1e377bc38d38b096d2bcf506ad1e35afcd94de1c
--- /dev/null
+++ b/checkpoint-58/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2ffd843f86241f649ae5523a8aa7c9d13157ef6bc9ba4fe819ec88f7f7923587
+size 1064
diff --git a/checkpoint-58/special_tokens_map.json b/checkpoint-58/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-58/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-58/tokenizer.model b/checkpoint-58/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-58/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-58/tokenizer_config.json b/checkpoint-58/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/checkpoint-58/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/checkpoint-58/trainer_state.json b/checkpoint-58/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..d3e2499c7d8b3c7b77bc47e28147ef2252b36342
--- /dev/null
+++ b/checkpoint-58/trainer_state.json
@@ -0,0 +1,503 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.949579831932773,
+ "eval_steps": 8,
+ "global_step": 58,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.03361344537815126,
+ "grad_norm": 0.115234375,
+ "learning_rate": 2e-05,
+ "loss": 1.768,
+ "step": 1
+ },
+ {
+ "epoch": 0.03361344537815126,
+ "eval_loss": 1.8648816347122192,
+ "eval_runtime": 18.2501,
+ "eval_samples_per_second": 2.74,
+ "eval_steps_per_second": 0.384,
+ "step": 1
+ },
+ {
+ "epoch": 0.06722689075630252,
+ "grad_norm": 0.10888671875,
+ "learning_rate": 4e-05,
+ "loss": 1.7838,
+ "step": 2
+ },
+ {
+ "epoch": 0.10084033613445378,
+ "grad_norm": 0.126953125,
+ "learning_rate": 6e-05,
+ "loss": 1.9413,
+ "step": 3
+ },
+ {
+ "epoch": 0.13445378151260504,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 8e-05,
+ "loss": 1.7757,
+ "step": 4
+ },
+ {
+ "epoch": 0.16806722689075632,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.0001,
+ "loss": 1.735,
+ "step": 5
+ },
+ {
+ "epoch": 0.20168067226890757,
+ "grad_norm": 0.10791015625,
+ "learning_rate": 0.00012,
+ "loss": 1.8269,
+ "step": 6
+ },
+ {
+ "epoch": 0.23529411764705882,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00014,
+ "loss": 1.8552,
+ "step": 7
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00016,
+ "loss": 1.8084,
+ "step": 8
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "eval_loss": 1.8317129611968994,
+ "eval_runtime": 19.6984,
+ "eval_samples_per_second": 2.538,
+ "eval_steps_per_second": 0.355,
+ "step": 8
+ },
+ {
+ "epoch": 0.3025210084033613,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.00018,
+ "loss": 1.7158,
+ "step": 9
+ },
+ {
+ "epoch": 0.33613445378151263,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0002,
+ "loss": 1.8702,
+ "step": 10
+ },
+ {
+ "epoch": 0.3697478991596639,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00019995608365087946,
+ "loss": 1.8307,
+ "step": 11
+ },
+ {
+ "epoch": 0.40336134453781514,
+ "grad_norm": 0.11474609375,
+ "learning_rate": 0.00019982437317643217,
+ "loss": 1.6583,
+ "step": 12
+ },
+ {
+ "epoch": 0.4369747899159664,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0001996049842615217,
+ "loss": 1.6663,
+ "step": 13
+ },
+ {
+ "epoch": 0.47058823529411764,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00019929810960135172,
+ "loss": 1.7388,
+ "step": 14
+ },
+ {
+ "epoch": 0.5042016806722689,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.0001989040187322164,
+ "loss": 1.7485,
+ "step": 15
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00019842305779475968,
+ "loss": 1.633,
+ "step": 16
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "eval_loss": 1.7832777500152588,
+ "eval_runtime": 19.6833,
+ "eval_samples_per_second": 2.54,
+ "eval_steps_per_second": 0.356,
+ "step": 16
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 0.12109375,
+ "learning_rate": 0.0001978556492299504,
+ "loss": 1.8373,
+ "step": 17
+ },
+ {
+ "epoch": 0.6050420168067226,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 0.0001972022914080411,
+ "loss": 1.6552,
+ "step": 18
+ },
+ {
+ "epoch": 0.6386554621848739,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019646355819083589,
+ "loss": 1.8113,
+ "step": 19
+ },
+ {
+ "epoch": 0.6722689075630253,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00019564009842765225,
+ "loss": 1.6544,
+ "step": 20
+ },
+ {
+ "epoch": 0.7058823529411765,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00019473263538541914,
+ "loss": 1.6649,
+ "step": 21
+ },
+ {
+ "epoch": 0.7394957983193278,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 0.0001937419661134121,
+ "loss": 1.6868,
+ "step": 22
+ },
+ {
+ "epoch": 0.773109243697479,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019266896074318334,
+ "loss": 1.7762,
+ "step": 23
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "grad_norm": 0.11279296875,
+ "learning_rate": 0.00019151456172430183,
+ "loss": 1.6737,
+ "step": 24
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "eval_loss": 1.7643933296203613,
+ "eval_runtime": 19.6308,
+ "eval_samples_per_second": 2.547,
+ "eval_steps_per_second": 0.357,
+ "step": 24
+ },
+ {
+ "epoch": 0.8403361344537815,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 0.00019027978299657436,
+ "loss": 1.6401,
+ "step": 25
+ },
+ {
+ "epoch": 0.8739495798319328,
+ "grad_norm": 0.099609375,
+ "learning_rate": 0.00018896570909947475,
+ "loss": 1.7068,
+ "step": 26
+ },
+ {
+ "epoch": 0.907563025210084,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.0001875734942195637,
+ "loss": 1.8112,
+ "step": 27
+ },
+ {
+ "epoch": 0.9411764705882353,
+ "grad_norm": 0.1162109375,
+ "learning_rate": 0.00018610436117673555,
+ "loss": 1.6596,
+ "step": 28
+ },
+ {
+ "epoch": 0.9747899159663865,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.0001845596003501826,
+ "loss": 1.7936,
+ "step": 29
+ },
+ {
+ "epoch": 1.0084033613445378,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001829405685450202,
+ "loss": 1.7947,
+ "step": 30
+ },
+ {
+ "epoch": 1.0420168067226891,
+ "grad_norm": 0.20703125,
+ "learning_rate": 0.00018124868780056814,
+ "loss": 1.6887,
+ "step": 31
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 0.00017948544414133534,
+ "loss": 1.6722,
+ "step": 32
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "eval_loss": 1.7600828409194946,
+ "eval_runtime": 19.7105,
+ "eval_samples_per_second": 2.537,
+ "eval_steps_per_second": 0.355,
+ "step": 32
+ },
+ {
+ "epoch": 1.1092436974789917,
+ "grad_norm": 0.09814453125,
+ "learning_rate": 0.00017765238627180424,
+ "loss": 1.7145,
+ "step": 33
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 0.10693359375,
+ "learning_rate": 0.00017575112421616202,
+ "loss": 1.6609,
+ "step": 34
+ },
+ {
+ "epoch": 1.1764705882352942,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00017378332790417273,
+ "loss": 1.6681,
+ "step": 35
+ },
+ {
+ "epoch": 1.2100840336134453,
+ "grad_norm": 0.11767578125,
+ "learning_rate": 0.00017175072570443312,
+ "loss": 1.6641,
+ "step": 36
+ },
+ {
+ "epoch": 1.2436974789915967,
+ "grad_norm": 0.11376953125,
+ "learning_rate": 0.00016965510290629972,
+ "loss": 1.7011,
+ "step": 37
+ },
+ {
+ "epoch": 1.2773109243697478,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00016749830015182107,
+ "loss": 1.7171,
+ "step": 38
+ },
+ {
+ "epoch": 1.3109243697478992,
+ "grad_norm": 0.103515625,
+ "learning_rate": 0.00016528221181905217,
+ "loss": 1.6333,
+ "step": 39
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "grad_norm": 0.111328125,
+ "learning_rate": 0.00016300878435817113,
+ "loss": 1.7162,
+ "step": 40
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "eval_loss": 1.757140040397644,
+ "eval_runtime": 19.6485,
+ "eval_samples_per_second": 2.545,
+ "eval_steps_per_second": 0.356,
+ "step": 40
+ },
+ {
+ "epoch": 1.3781512605042017,
+ "grad_norm": 0.1484375,
+ "learning_rate": 0.00016068001458185936,
+ "loss": 1.6501,
+ "step": 41
+ },
+ {
+ "epoch": 1.4117647058823528,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001582979479114472,
+ "loss": 1.6446,
+ "step": 42
+ },
+ {
+ "epoch": 1.4453781512605042,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00015586467658036524,
+ "loss": 1.7104,
+ "step": 43
+ },
+ {
+ "epoch": 1.4789915966386555,
+ "grad_norm": 0.109375,
+ "learning_rate": 0.0001533823377964791,
+ "loss": 1.6146,
+ "step": 44
+ },
+ {
+ "epoch": 1.5126050420168067,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00015085311186492206,
+ "loss": 1.6448,
+ "step": 45
+ },
+ {
+ "epoch": 1.5462184873949578,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00014827922027307451,
+ "loss": 1.6735,
+ "step": 46
+ },
+ {
+ "epoch": 1.5798319327731094,
+ "grad_norm": 0.1181640625,
+ "learning_rate": 0.0001456629237393713,
+ "loss": 1.6604,
+ "step": 47
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00014300652022765207,
+ "loss": 1.7046,
+ "step": 48
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "eval_loss": 1.7558497190475464,
+ "eval_runtime": 19.7723,
+ "eval_samples_per_second": 2.529,
+ "eval_steps_per_second": 0.354,
+ "step": 48
+ },
+ {
+ "epoch": 1.6470588235294117,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00014031234292879725,
+ "loss": 1.694,
+ "step": 49
+ },
+ {
+ "epoch": 1.680672268907563,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00013758275821142382,
+ "loss": 1.625,
+ "step": 50
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 0.123046875,
+ "learning_rate": 0.0001348201635434399,
+ "loss": 1.6919,
+ "step": 51
+ },
+ {
+ "epoch": 1.7478991596638656,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.00013202698538628376,
+ "loss": 1.6777,
+ "step": 52
+ },
+ {
+ "epoch": 1.7815126050420167,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00012920567706369758,
+ "loss": 1.7762,
+ "step": 53
+ },
+ {
+ "epoch": 1.815126050420168,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00012635871660690676,
+ "loss": 1.6668,
+ "step": 54
+ },
+ {
+ "epoch": 1.8487394957983194,
+ "grad_norm": 0.125,
+ "learning_rate": 0.00012348860457809838,
+ "loss": 1.7061,
+ "step": 55
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00012059786187410984,
+ "loss": 1.6714,
+ "step": 56
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "eval_loss": 1.7563551664352417,
+ "eval_runtime": 19.6588,
+ "eval_samples_per_second": 2.543,
+ "eval_steps_per_second": 0.356,
+ "step": 56
+ },
+ {
+ "epoch": 1.9159663865546217,
+ "grad_norm": 0.1279296875,
+ "learning_rate": 0.0001176890275122573,
+ "loss": 1.6318,
+ "step": 57
+ },
+ {
+ "epoch": 1.949579831932773,
+ "grad_norm": 0.1318359375,
+ "learning_rate": 0.00011476465640024814,
+ "loss": 1.6693,
+ "step": 58
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 116,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 29,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 6643321582387200.0,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-58/training_args.bin b/checkpoint-58/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c07aacc14ae35e4961f88179b331f061deef6ee1
--- /dev/null
+++ b/checkpoint-58/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f22338230e6fb5499b3cb737d6ee991fc1b36ba21747e343dbc66d770479d2b
+size 5944
diff --git a/checkpoint-87/README.md b/checkpoint-87/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1ccd431539a8f1507d8755a9c3ba5e5b2897978
--- /dev/null
+++ b/checkpoint-87/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.11.1
\ No newline at end of file
diff --git a/checkpoint-87/adapter_config.json b/checkpoint-87/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/checkpoint-87/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/checkpoint-87/adapter_model.safetensors b/checkpoint-87/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c1d20d28ff81e0ed0999329ceb1ef1f8b07218e3
--- /dev/null
+++ b/checkpoint-87/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4c119a868a2f168125d890295ff7e7d1e8709c364ae22393fd56670ef283bd84
+size 50503848
diff --git a/checkpoint-87/optimizer.pt b/checkpoint-87/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..585f4ca53e1081a671ad6811e9c49c8f0b15b380
--- /dev/null
+++ b/checkpoint-87/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be530a8fa8568da1149a6eb3fa1e0d3e6641ab7b50fc6a724480c58020d8db51
+size 202035450
diff --git a/checkpoint-87/rng_state.pth b/checkpoint-87/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b879ea1ffc28828c28e813f2f6c0593a8ac8796a
--- /dev/null
+++ b/checkpoint-87/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abee6b45a92cc12ab71fa5ef10750e86110e096fada3acf2955d86fe41f121d3
+size 14244
diff --git a/checkpoint-87/scheduler.pt b/checkpoint-87/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..769177b071d8fbc5d172d02f4a8b213b334b7c86
--- /dev/null
+++ b/checkpoint-87/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96dfdc4ac2eb14a7a5283672324c02dd21283974614857cbf29d0d55e24e2b3f
+size 1064
diff --git a/checkpoint-87/special_tokens_map.json b/checkpoint-87/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/checkpoint-87/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-87/tokenizer.model b/checkpoint-87/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/checkpoint-87/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/checkpoint-87/tokenizer_config.json b/checkpoint-87/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/checkpoint-87/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/checkpoint-87/trainer_state.json b/checkpoint-87/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..82ed7f3515632072d49c2daaead6ee981950aea0
--- /dev/null
+++ b/checkpoint-87/trainer_state.json
@@ -0,0 +1,730 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.92436974789916,
+ "eval_steps": 8,
+ "global_step": 87,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.03361344537815126,
+ "grad_norm": 0.115234375,
+ "learning_rate": 2e-05,
+ "loss": 1.768,
+ "step": 1
+ },
+ {
+ "epoch": 0.03361344537815126,
+ "eval_loss": 1.8648816347122192,
+ "eval_runtime": 18.2501,
+ "eval_samples_per_second": 2.74,
+ "eval_steps_per_second": 0.384,
+ "step": 1
+ },
+ {
+ "epoch": 0.06722689075630252,
+ "grad_norm": 0.10888671875,
+ "learning_rate": 4e-05,
+ "loss": 1.7838,
+ "step": 2
+ },
+ {
+ "epoch": 0.10084033613445378,
+ "grad_norm": 0.126953125,
+ "learning_rate": 6e-05,
+ "loss": 1.9413,
+ "step": 3
+ },
+ {
+ "epoch": 0.13445378151260504,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 8e-05,
+ "loss": 1.7757,
+ "step": 4
+ },
+ {
+ "epoch": 0.16806722689075632,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.0001,
+ "loss": 1.735,
+ "step": 5
+ },
+ {
+ "epoch": 0.20168067226890757,
+ "grad_norm": 0.10791015625,
+ "learning_rate": 0.00012,
+ "loss": 1.8269,
+ "step": 6
+ },
+ {
+ "epoch": 0.23529411764705882,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00014,
+ "loss": 1.8552,
+ "step": 7
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00016,
+ "loss": 1.8084,
+ "step": 8
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "eval_loss": 1.8317129611968994,
+ "eval_runtime": 19.6984,
+ "eval_samples_per_second": 2.538,
+ "eval_steps_per_second": 0.355,
+ "step": 8
+ },
+ {
+ "epoch": 0.3025210084033613,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.00018,
+ "loss": 1.7158,
+ "step": 9
+ },
+ {
+ "epoch": 0.33613445378151263,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0002,
+ "loss": 1.8702,
+ "step": 10
+ },
+ {
+ "epoch": 0.3697478991596639,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00019995608365087946,
+ "loss": 1.8307,
+ "step": 11
+ },
+ {
+ "epoch": 0.40336134453781514,
+ "grad_norm": 0.11474609375,
+ "learning_rate": 0.00019982437317643217,
+ "loss": 1.6583,
+ "step": 12
+ },
+ {
+ "epoch": 0.4369747899159664,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0001996049842615217,
+ "loss": 1.6663,
+ "step": 13
+ },
+ {
+ "epoch": 0.47058823529411764,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00019929810960135172,
+ "loss": 1.7388,
+ "step": 14
+ },
+ {
+ "epoch": 0.5042016806722689,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.0001989040187322164,
+ "loss": 1.7485,
+ "step": 15
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00019842305779475968,
+ "loss": 1.633,
+ "step": 16
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "eval_loss": 1.7832777500152588,
+ "eval_runtime": 19.6833,
+ "eval_samples_per_second": 2.54,
+ "eval_steps_per_second": 0.356,
+ "step": 16
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 0.12109375,
+ "learning_rate": 0.0001978556492299504,
+ "loss": 1.8373,
+ "step": 17
+ },
+ {
+ "epoch": 0.6050420168067226,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 0.0001972022914080411,
+ "loss": 1.6552,
+ "step": 18
+ },
+ {
+ "epoch": 0.6386554621848739,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019646355819083589,
+ "loss": 1.8113,
+ "step": 19
+ },
+ {
+ "epoch": 0.6722689075630253,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00019564009842765225,
+ "loss": 1.6544,
+ "step": 20
+ },
+ {
+ "epoch": 0.7058823529411765,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00019473263538541914,
+ "loss": 1.6649,
+ "step": 21
+ },
+ {
+ "epoch": 0.7394957983193278,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 0.0001937419661134121,
+ "loss": 1.6868,
+ "step": 22
+ },
+ {
+ "epoch": 0.773109243697479,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019266896074318334,
+ "loss": 1.7762,
+ "step": 23
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "grad_norm": 0.11279296875,
+ "learning_rate": 0.00019151456172430183,
+ "loss": 1.6737,
+ "step": 24
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "eval_loss": 1.7643933296203613,
+ "eval_runtime": 19.6308,
+ "eval_samples_per_second": 2.547,
+ "eval_steps_per_second": 0.357,
+ "step": 24
+ },
+ {
+ "epoch": 0.8403361344537815,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 0.00019027978299657436,
+ "loss": 1.6401,
+ "step": 25
+ },
+ {
+ "epoch": 0.8739495798319328,
+ "grad_norm": 0.099609375,
+ "learning_rate": 0.00018896570909947475,
+ "loss": 1.7068,
+ "step": 26
+ },
+ {
+ "epoch": 0.907563025210084,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.0001875734942195637,
+ "loss": 1.8112,
+ "step": 27
+ },
+ {
+ "epoch": 0.9411764705882353,
+ "grad_norm": 0.1162109375,
+ "learning_rate": 0.00018610436117673555,
+ "loss": 1.6596,
+ "step": 28
+ },
+ {
+ "epoch": 0.9747899159663865,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.0001845596003501826,
+ "loss": 1.7936,
+ "step": 29
+ },
+ {
+ "epoch": 1.0084033613445378,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001829405685450202,
+ "loss": 1.7947,
+ "step": 30
+ },
+ {
+ "epoch": 1.0420168067226891,
+ "grad_norm": 0.20703125,
+ "learning_rate": 0.00018124868780056814,
+ "loss": 1.6887,
+ "step": 31
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 0.00017948544414133534,
+ "loss": 1.6722,
+ "step": 32
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "eval_loss": 1.7600828409194946,
+ "eval_runtime": 19.7105,
+ "eval_samples_per_second": 2.537,
+ "eval_steps_per_second": 0.355,
+ "step": 32
+ },
+ {
+ "epoch": 1.1092436974789917,
+ "grad_norm": 0.09814453125,
+ "learning_rate": 0.00017765238627180424,
+ "loss": 1.7145,
+ "step": 33
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 0.10693359375,
+ "learning_rate": 0.00017575112421616202,
+ "loss": 1.6609,
+ "step": 34
+ },
+ {
+ "epoch": 1.1764705882352942,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00017378332790417273,
+ "loss": 1.6681,
+ "step": 35
+ },
+ {
+ "epoch": 1.2100840336134453,
+ "grad_norm": 0.11767578125,
+ "learning_rate": 0.00017175072570443312,
+ "loss": 1.6641,
+ "step": 36
+ },
+ {
+ "epoch": 1.2436974789915967,
+ "grad_norm": 0.11376953125,
+ "learning_rate": 0.00016965510290629972,
+ "loss": 1.7011,
+ "step": 37
+ },
+ {
+ "epoch": 1.2773109243697478,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00016749830015182107,
+ "loss": 1.7171,
+ "step": 38
+ },
+ {
+ "epoch": 1.3109243697478992,
+ "grad_norm": 0.103515625,
+ "learning_rate": 0.00016528221181905217,
+ "loss": 1.6333,
+ "step": 39
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "grad_norm": 0.111328125,
+ "learning_rate": 0.00016300878435817113,
+ "loss": 1.7162,
+ "step": 40
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "eval_loss": 1.757140040397644,
+ "eval_runtime": 19.6485,
+ "eval_samples_per_second": 2.545,
+ "eval_steps_per_second": 0.356,
+ "step": 40
+ },
+ {
+ "epoch": 1.3781512605042017,
+ "grad_norm": 0.1484375,
+ "learning_rate": 0.00016068001458185936,
+ "loss": 1.6501,
+ "step": 41
+ },
+ {
+ "epoch": 1.4117647058823528,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001582979479114472,
+ "loss": 1.6446,
+ "step": 42
+ },
+ {
+ "epoch": 1.4453781512605042,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00015586467658036524,
+ "loss": 1.7104,
+ "step": 43
+ },
+ {
+ "epoch": 1.4789915966386555,
+ "grad_norm": 0.109375,
+ "learning_rate": 0.0001533823377964791,
+ "loss": 1.6146,
+ "step": 44
+ },
+ {
+ "epoch": 1.5126050420168067,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00015085311186492206,
+ "loss": 1.6448,
+ "step": 45
+ },
+ {
+ "epoch": 1.5462184873949578,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00014827922027307451,
+ "loss": 1.6735,
+ "step": 46
+ },
+ {
+ "epoch": 1.5798319327731094,
+ "grad_norm": 0.1181640625,
+ "learning_rate": 0.0001456629237393713,
+ "loss": 1.6604,
+ "step": 47
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00014300652022765207,
+ "loss": 1.7046,
+ "step": 48
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "eval_loss": 1.7558497190475464,
+ "eval_runtime": 19.7723,
+ "eval_samples_per_second": 2.529,
+ "eval_steps_per_second": 0.354,
+ "step": 48
+ },
+ {
+ "epoch": 1.6470588235294117,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00014031234292879725,
+ "loss": 1.694,
+ "step": 49
+ },
+ {
+ "epoch": 1.680672268907563,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00013758275821142382,
+ "loss": 1.625,
+ "step": 50
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 0.123046875,
+ "learning_rate": 0.0001348201635434399,
+ "loss": 1.6919,
+ "step": 51
+ },
+ {
+ "epoch": 1.7478991596638656,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.00013202698538628376,
+ "loss": 1.6777,
+ "step": 52
+ },
+ {
+ "epoch": 1.7815126050420167,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00012920567706369758,
+ "loss": 1.7762,
+ "step": 53
+ },
+ {
+ "epoch": 1.815126050420168,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00012635871660690676,
+ "loss": 1.6668,
+ "step": 54
+ },
+ {
+ "epoch": 1.8487394957983194,
+ "grad_norm": 0.125,
+ "learning_rate": 0.00012348860457809838,
+ "loss": 1.7061,
+ "step": 55
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00012059786187410984,
+ "loss": 1.6714,
+ "step": 56
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "eval_loss": 1.7563551664352417,
+ "eval_runtime": 19.6588,
+ "eval_samples_per_second": 2.543,
+ "eval_steps_per_second": 0.356,
+ "step": 56
+ },
+ {
+ "epoch": 1.9159663865546217,
+ "grad_norm": 0.1279296875,
+ "learning_rate": 0.0001176890275122573,
+ "loss": 1.6318,
+ "step": 57
+ },
+ {
+ "epoch": 1.949579831932773,
+ "grad_norm": 0.1318359375,
+ "learning_rate": 0.00011476465640024814,
+ "loss": 1.6693,
+ "step": 58
+ },
+ {
+ "epoch": 1.9831932773109244,
+ "grad_norm": 0.12353515625,
+ "learning_rate": 0.00011182731709213659,
+ "loss": 1.5927,
+ "step": 59
+ },
+ {
+ "epoch": 2.0168067226890756,
+ "grad_norm": 0.138671875,
+ "learning_rate": 0.00010887958953229349,
+ "loss": 1.6558,
+ "step": 60
+ },
+ {
+ "epoch": 2.0504201680672267,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.00010592406278937144,
+ "loss": 1.7352,
+ "step": 61
+ },
+ {
+ "epoch": 2.0840336134453783,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00010296333278225599,
+ "loss": 1.6216,
+ "step": 62
+ },
+ {
+ "epoch": 2.1176470588235294,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.0001,
+ "loss": 1.6365,
+ "step": 63
+ },
+ {
+ "epoch": 2.1512605042016806,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 9.703666721774402e-05,
+ "loss": 1.6249,
+ "step": 64
+ },
+ {
+ "epoch": 2.1512605042016806,
+ "eval_loss": 1.756639838218689,
+ "eval_runtime": 19.5951,
+ "eval_samples_per_second": 2.552,
+ "eval_steps_per_second": 0.357,
+ "step": 64
+ },
+ {
+ "epoch": 2.184873949579832,
+ "grad_norm": 0.1357421875,
+ "learning_rate": 9.407593721062859e-05,
+ "loss": 1.653,
+ "step": 65
+ },
+ {
+ "epoch": 2.2184873949579833,
+ "grad_norm": 0.146484375,
+ "learning_rate": 9.112041046770653e-05,
+ "loss": 1.6545,
+ "step": 66
+ },
+ {
+ "epoch": 2.2521008403361344,
+ "grad_norm": 0.13671875,
+ "learning_rate": 8.817268290786343e-05,
+ "loss": 1.5787,
+ "step": 67
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 0.1328125,
+ "learning_rate": 8.523534359975189e-05,
+ "loss": 1.6532,
+ "step": 68
+ },
+ {
+ "epoch": 2.3193277310924367,
+ "grad_norm": 0.1396484375,
+ "learning_rate": 8.231097248774274e-05,
+ "loss": 1.6784,
+ "step": 69
+ },
+ {
+ "epoch": 2.3529411764705883,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 7.940213812589018e-05,
+ "loss": 1.5721,
+ "step": 70
+ },
+ {
+ "epoch": 2.3865546218487395,
+ "grad_norm": 0.1416015625,
+ "learning_rate": 7.651139542190164e-05,
+ "loss": 1.5836,
+ "step": 71
+ },
+ {
+ "epoch": 2.4201680672268906,
+ "grad_norm": 0.146484375,
+ "learning_rate": 7.364128339309326e-05,
+ "loss": 1.5604,
+ "step": 72
+ },
+ {
+ "epoch": 2.4201680672268906,
+ "eval_loss": 1.7598735094070435,
+ "eval_runtime": 19.7508,
+ "eval_samples_per_second": 2.532,
+ "eval_steps_per_second": 0.354,
+ "step": 72
+ },
+ {
+ "epoch": 2.453781512605042,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 7.079432293630244e-05,
+ "loss": 1.6259,
+ "step": 73
+ },
+ {
+ "epoch": 2.4873949579831933,
+ "grad_norm": 0.1484375,
+ "learning_rate": 6.797301461371625e-05,
+ "loss": 1.5811,
+ "step": 74
+ },
+ {
+ "epoch": 2.5210084033613445,
+ "grad_norm": 0.14453125,
+ "learning_rate": 6.517983645656014e-05,
+ "loss": 1.4929,
+ "step": 75
+ },
+ {
+ "epoch": 2.5546218487394956,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 6.24172417885762e-05,
+ "loss": 1.7014,
+ "step": 76
+ },
+ {
+ "epoch": 2.588235294117647,
+ "grad_norm": 0.1484375,
+ "learning_rate": 5.96876570712028e-05,
+ "loss": 1.5623,
+ "step": 77
+ },
+ {
+ "epoch": 2.6218487394957983,
+ "grad_norm": 0.1474609375,
+ "learning_rate": 5.699347977234799e-05,
+ "loss": 1.6006,
+ "step": 78
+ },
+ {
+ "epoch": 2.6554621848739495,
+ "grad_norm": 0.150390625,
+ "learning_rate": 5.43370762606287e-05,
+ "loss": 1.6641,
+ "step": 79
+ },
+ {
+ "epoch": 2.689075630252101,
+ "grad_norm": 0.15234375,
+ "learning_rate": 5.172077972692553e-05,
+ "loss": 1.7003,
+ "step": 80
+ },
+ {
+ "epoch": 2.689075630252101,
+ "eval_loss": 1.761399269104004,
+ "eval_runtime": 19.6692,
+ "eval_samples_per_second": 2.542,
+ "eval_steps_per_second": 0.356,
+ "step": 80
+ },
+ {
+ "epoch": 2.722689075630252,
+ "grad_norm": 0.154296875,
+ "learning_rate": 4.914688813507797e-05,
+ "loss": 1.6923,
+ "step": 81
+ },
+ {
+ "epoch": 2.7563025210084033,
+ "grad_norm": 0.158203125,
+ "learning_rate": 4.661766220352097e-05,
+ "loss": 1.6819,
+ "step": 82
+ },
+ {
+ "epoch": 2.7899159663865545,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 4.4135323419634766e-05,
+ "loss": 1.5649,
+ "step": 83
+ },
+ {
+ "epoch": 2.8235294117647056,
+ "grad_norm": 0.1494140625,
+ "learning_rate": 4.170205208855281e-05,
+ "loss": 1.608,
+ "step": 84
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 0.1611328125,
+ "learning_rate": 3.931998541814069e-05,
+ "loss": 1.5474,
+ "step": 85
+ },
+ {
+ "epoch": 2.8907563025210083,
+ "grad_norm": 0.1552734375,
+ "learning_rate": 3.69912156418289e-05,
+ "loss": 1.6484,
+ "step": 86
+ },
+ {
+ "epoch": 2.92436974789916,
+ "grad_norm": 0.1484375,
+ "learning_rate": 3.471778818094785e-05,
+ "loss": 1.6145,
+ "step": 87
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 116,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 29,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 9933851341357056.0,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-87/training_args.bin b/checkpoint-87/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c07aacc14ae35e4961f88179b331f061deef6ee1
--- /dev/null
+++ b/checkpoint-87/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f22338230e6fb5499b3cb737d6ee991fc1b36ba21747e343dbc66d770479d2b
+size 5944
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..4fd46aded17c1e2a61e180b1bb90c4097c007cd9
--- /dev/null
+++ b/config.json
@@ -0,0 +1,44 @@
+{
+ "_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "architectures": [
+ "LlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 2048,
+ "initializer_range": 0.02,
+ "intermediate_size": 5632,
+ "max_position_embeddings": 4096,
+ "mlp_bias": false,
+ "model_type": "llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 22,
+ "num_key_value_heads": 4,
+ "pretraining_tp": 1,
+ "quantization_config": {
+ "_load_in_4bit": true,
+ "_load_in_8bit": false,
+ "bnb_4bit_compute_dtype": "float32",
+ "bnb_4bit_quant_storage": "bfloat16",
+ "bnb_4bit_quant_type": "nf4",
+ "bnb_4bit_use_double_quant": true,
+ "llm_int8_enable_fp32_cpu_offload": false,
+ "llm_int8_has_fp16_weight": false,
+ "llm_int8_skip_modules": null,
+ "llm_int8_threshold": 6.0,
+ "load_in_4bit": true,
+ "load_in_8bit": false,
+ "quant_method": "bitsandbytes"
+ },
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 10000.0,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.41.1",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/content/outputs/qlora-out/README.md b/content/outputs/qlora-out/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3a7873f84dfbc90de6f6ce83f3b9fc02e750f046
--- /dev/null
+++ b/content/outputs/qlora-out/README.md
@@ -0,0 +1,143 @@
+---
+license: apache-2.0
+library_name: peft
+tags:
+- generated_from_trainer
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+model-index:
+- name: outputs/qlora-out
+ results: []
+---
+
+
+
+[
](https://github.com/OpenAccess-AI-Collective/axolotl)
+See axolotl config
+
+axolotl version: `0.4.1`
+```yaml
+adapter: qlora
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+bf16: false
+dataset_prepared_path: null
+datasets:
+- ds_tipe: json
+ path: pubmed_continual_pretraning_dataset.jsonl
+ type: completion
+debug: null
+deepspeed: null
+early_stopping_patience: null
+eval_sample_packing: false
+evals_per_epoch: 4
+flash_attention: false
+fp16: null
+fsdp: null
+fsdp_config: null
+gradient_accumulation_steps: 4
+gradient_checkpointing: true
+group_by_length: false
+learning_rate: 0.0002
+load_in_4bit: true
+load_in_8bit: false
+local_rank: null
+logging_steps: 1
+lora_alpha: 16
+lora_dropout: 0.05
+lora_fan_in_fan_out: null
+lora_model_dir: null
+lora_r: 32
+lora_target_linear: true
+lora_target_modules: null
+lr_scheduler: cosine
+micro_batch_size: 8
+model_type: LlamaForCausalLM
+num_epochs: 4
+optimizer: paged_adamw_32bit
+output_dir: ./outputs/qlora-out
+pad_to_sequence_len: false
+resume_from_checkpoint: null
+sample_packing: false
+saves_per_epoch: 1
+sequence_len: 4096
+special_tokens: null
+strict: false
+tf32: false
+tokenizer_type: LlamaTokenizer
+train_on_inputs: false
+val_set_size: 0.05
+wandb_entity: null
+wandb_log_model: null
+wandb_name: null
+wandb_project: null
+wandb_watch: null
+warmup_steps: 10
+weight_decay: 0.0
+xformers_attention: null
+
+```
+
+
+
+# outputs/qlora-out
+
+This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) on the None dataset.
+It achieves the following results on the evaluation set:
+- Loss: 1.7613
+
+## Model description
+
+More information needed
+
+## Intended uses & limitations
+
+More information needed
+
+## Training and evaluation data
+
+More information needed
+
+## Training procedure
+
+### Training hyperparameters
+
+The following hyperparameters were used during training:
+- learning_rate: 0.0002
+- train_batch_size: 8
+- eval_batch_size: 8
+- seed: 42
+- gradient_accumulation_steps: 4
+- total_train_batch_size: 32
+- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
+- lr_scheduler_type: cosine
+- lr_scheduler_warmup_steps: 10
+- num_epochs: 4
+
+### Training results
+
+| Training Loss | Epoch | Step | Validation Loss |
+|:-------------:|:------:|:----:|:---------------:|
+| 1.768 | 0.0336 | 1 | 1.8649 |
+| 1.8084 | 0.2689 | 8 | 1.8317 |
+| 1.633 | 0.5378 | 16 | 1.7833 |
+| 1.6737 | 0.8067 | 24 | 1.7644 |
+| 1.6722 | 1.0756 | 32 | 1.7601 |
+| 1.7162 | 1.3445 | 40 | 1.7571 |
+| 1.7046 | 1.6134 | 48 | 1.7558 |
+| 1.6714 | 1.8824 | 56 | 1.7564 |
+| 1.6249 | 2.1513 | 64 | 1.7566 |
+| 1.5604 | 2.4202 | 72 | 1.7599 |
+| 1.7003 | 2.6891 | 80 | 1.7614 |
+| 1.7115 | 2.9580 | 88 | 1.7605 |
+| 1.5937 | 3.2269 | 96 | 1.7609 |
+| 1.655 | 3.4958 | 104 | 1.7612 |
+| 1.5829 | 3.7647 | 112 | 1.7613 |
+
+
+### Framework versions
+
+- PEFT 0.11.1
+- Transformers 4.41.1
+- Pytorch 2.1.2+cu121
+- Datasets 2.19.1
+- Tokenizers 0.19.1
\ No newline at end of file
diff --git a/content/outputs/qlora-out/adapter_config.json b/content/outputs/qlora-out/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/content/outputs/qlora-out/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/content/outputs/qlora-out/adapter_model.bin b/content/outputs/qlora-out/adapter_model.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8ed705314a209e3238104543c20f116844bb81be
--- /dev/null
+++ b/content/outputs/qlora-out/adapter_model.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a07fceb0f4a213b192376861226c495015857f3a4db9a7662c58acf847cac9b7
+size 50573978
diff --git a/content/outputs/qlora-out/checkpoint-116/README.md b/content/outputs/qlora-out/checkpoint-116/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1ccd431539a8f1507d8755a9c3ba5e5b2897978
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.11.1
\ No newline at end of file
diff --git a/content/outputs/qlora-out/checkpoint-116/adapter_config.json b/content/outputs/qlora-out/checkpoint-116/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/content/outputs/qlora-out/checkpoint-116/adapter_model.safetensors b/content/outputs/qlora-out/checkpoint-116/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c2f8ae14c1c003567ecce01c661cf0c25ebf0913
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8668d68993cbafac06993db9d1c83bc618398531aab80fdab4198b3b92fe2ccb
+size 50503848
diff --git a/content/outputs/qlora-out/checkpoint-116/optimizer.pt b/content/outputs/qlora-out/checkpoint-116/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8a196c686c309683333cfbd09c94e815862cb537
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:13f111d1655d1c0c5f946c1a507f8decff70637f9ab53f6b1f6bc534501332c0
+size 202035450
diff --git a/content/outputs/qlora-out/checkpoint-116/rng_state.pth b/content/outputs/qlora-out/checkpoint-116/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e056ab6a565d11467a679306a9742ecdd4b928da
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:edbf3bea1d4f693a677a0bf33551e5e3063cbd96faf6ce6d46f9d3b4ff62532d
+size 14244
diff --git a/content/outputs/qlora-out/checkpoint-116/scheduler.pt b/content/outputs/qlora-out/checkpoint-116/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f09aa9eb766b3356ec7e327795e67e35471f1984
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68424c30349bda77a4657d1902f52c5a348248ab657a58e4aee2ebfc370c25ab
+size 1064
diff --git a/content/outputs/qlora-out/checkpoint-116/special_tokens_map.json b/content/outputs/qlora-out/checkpoint-116/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/content/outputs/qlora-out/checkpoint-116/tokenizer.model b/content/outputs/qlora-out/checkpoint-116/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/content/outputs/qlora-out/checkpoint-116/tokenizer_config.json b/content/outputs/qlora-out/checkpoint-116/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/content/outputs/qlora-out/checkpoint-116/trainer_state.json b/content/outputs/qlora-out/checkpoint-116/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..8e65f658dc6fd56e9f30910877602ef09385680e
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/trainer_state.json
@@ -0,0 +1,965 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 3.899159663865546,
+ "eval_steps": 8,
+ "global_step": 116,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.03361344537815126,
+ "grad_norm": 0.115234375,
+ "learning_rate": 2e-05,
+ "loss": 1.768,
+ "step": 1
+ },
+ {
+ "epoch": 0.03361344537815126,
+ "eval_loss": 1.8648816347122192,
+ "eval_runtime": 18.2501,
+ "eval_samples_per_second": 2.74,
+ "eval_steps_per_second": 0.384,
+ "step": 1
+ },
+ {
+ "epoch": 0.06722689075630252,
+ "grad_norm": 0.10888671875,
+ "learning_rate": 4e-05,
+ "loss": 1.7838,
+ "step": 2
+ },
+ {
+ "epoch": 0.10084033613445378,
+ "grad_norm": 0.126953125,
+ "learning_rate": 6e-05,
+ "loss": 1.9413,
+ "step": 3
+ },
+ {
+ "epoch": 0.13445378151260504,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 8e-05,
+ "loss": 1.7757,
+ "step": 4
+ },
+ {
+ "epoch": 0.16806722689075632,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.0001,
+ "loss": 1.735,
+ "step": 5
+ },
+ {
+ "epoch": 0.20168067226890757,
+ "grad_norm": 0.10791015625,
+ "learning_rate": 0.00012,
+ "loss": 1.8269,
+ "step": 6
+ },
+ {
+ "epoch": 0.23529411764705882,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00014,
+ "loss": 1.8552,
+ "step": 7
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00016,
+ "loss": 1.8084,
+ "step": 8
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "eval_loss": 1.8317129611968994,
+ "eval_runtime": 19.6984,
+ "eval_samples_per_second": 2.538,
+ "eval_steps_per_second": 0.355,
+ "step": 8
+ },
+ {
+ "epoch": 0.3025210084033613,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.00018,
+ "loss": 1.7158,
+ "step": 9
+ },
+ {
+ "epoch": 0.33613445378151263,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0002,
+ "loss": 1.8702,
+ "step": 10
+ },
+ {
+ "epoch": 0.3697478991596639,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00019995608365087946,
+ "loss": 1.8307,
+ "step": 11
+ },
+ {
+ "epoch": 0.40336134453781514,
+ "grad_norm": 0.11474609375,
+ "learning_rate": 0.00019982437317643217,
+ "loss": 1.6583,
+ "step": 12
+ },
+ {
+ "epoch": 0.4369747899159664,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0001996049842615217,
+ "loss": 1.6663,
+ "step": 13
+ },
+ {
+ "epoch": 0.47058823529411764,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00019929810960135172,
+ "loss": 1.7388,
+ "step": 14
+ },
+ {
+ "epoch": 0.5042016806722689,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.0001989040187322164,
+ "loss": 1.7485,
+ "step": 15
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00019842305779475968,
+ "loss": 1.633,
+ "step": 16
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "eval_loss": 1.7832777500152588,
+ "eval_runtime": 19.6833,
+ "eval_samples_per_second": 2.54,
+ "eval_steps_per_second": 0.356,
+ "step": 16
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 0.12109375,
+ "learning_rate": 0.0001978556492299504,
+ "loss": 1.8373,
+ "step": 17
+ },
+ {
+ "epoch": 0.6050420168067226,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 0.0001972022914080411,
+ "loss": 1.6552,
+ "step": 18
+ },
+ {
+ "epoch": 0.6386554621848739,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019646355819083589,
+ "loss": 1.8113,
+ "step": 19
+ },
+ {
+ "epoch": 0.6722689075630253,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00019564009842765225,
+ "loss": 1.6544,
+ "step": 20
+ },
+ {
+ "epoch": 0.7058823529411765,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00019473263538541914,
+ "loss": 1.6649,
+ "step": 21
+ },
+ {
+ "epoch": 0.7394957983193278,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 0.0001937419661134121,
+ "loss": 1.6868,
+ "step": 22
+ },
+ {
+ "epoch": 0.773109243697479,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019266896074318334,
+ "loss": 1.7762,
+ "step": 23
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "grad_norm": 0.11279296875,
+ "learning_rate": 0.00019151456172430183,
+ "loss": 1.6737,
+ "step": 24
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "eval_loss": 1.7643933296203613,
+ "eval_runtime": 19.6308,
+ "eval_samples_per_second": 2.547,
+ "eval_steps_per_second": 0.357,
+ "step": 24
+ },
+ {
+ "epoch": 0.8403361344537815,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 0.00019027978299657436,
+ "loss": 1.6401,
+ "step": 25
+ },
+ {
+ "epoch": 0.8739495798319328,
+ "grad_norm": 0.099609375,
+ "learning_rate": 0.00018896570909947475,
+ "loss": 1.7068,
+ "step": 26
+ },
+ {
+ "epoch": 0.907563025210084,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.0001875734942195637,
+ "loss": 1.8112,
+ "step": 27
+ },
+ {
+ "epoch": 0.9411764705882353,
+ "grad_norm": 0.1162109375,
+ "learning_rate": 0.00018610436117673555,
+ "loss": 1.6596,
+ "step": 28
+ },
+ {
+ "epoch": 0.9747899159663865,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.0001845596003501826,
+ "loss": 1.7936,
+ "step": 29
+ },
+ {
+ "epoch": 1.0084033613445378,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001829405685450202,
+ "loss": 1.7947,
+ "step": 30
+ },
+ {
+ "epoch": 1.0420168067226891,
+ "grad_norm": 0.20703125,
+ "learning_rate": 0.00018124868780056814,
+ "loss": 1.6887,
+ "step": 31
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 0.00017948544414133534,
+ "loss": 1.6722,
+ "step": 32
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "eval_loss": 1.7600828409194946,
+ "eval_runtime": 19.7105,
+ "eval_samples_per_second": 2.537,
+ "eval_steps_per_second": 0.355,
+ "step": 32
+ },
+ {
+ "epoch": 1.1092436974789917,
+ "grad_norm": 0.09814453125,
+ "learning_rate": 0.00017765238627180424,
+ "loss": 1.7145,
+ "step": 33
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 0.10693359375,
+ "learning_rate": 0.00017575112421616202,
+ "loss": 1.6609,
+ "step": 34
+ },
+ {
+ "epoch": 1.1764705882352942,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00017378332790417273,
+ "loss": 1.6681,
+ "step": 35
+ },
+ {
+ "epoch": 1.2100840336134453,
+ "grad_norm": 0.11767578125,
+ "learning_rate": 0.00017175072570443312,
+ "loss": 1.6641,
+ "step": 36
+ },
+ {
+ "epoch": 1.2436974789915967,
+ "grad_norm": 0.11376953125,
+ "learning_rate": 0.00016965510290629972,
+ "loss": 1.7011,
+ "step": 37
+ },
+ {
+ "epoch": 1.2773109243697478,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00016749830015182107,
+ "loss": 1.7171,
+ "step": 38
+ },
+ {
+ "epoch": 1.3109243697478992,
+ "grad_norm": 0.103515625,
+ "learning_rate": 0.00016528221181905217,
+ "loss": 1.6333,
+ "step": 39
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "grad_norm": 0.111328125,
+ "learning_rate": 0.00016300878435817113,
+ "loss": 1.7162,
+ "step": 40
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "eval_loss": 1.757140040397644,
+ "eval_runtime": 19.6485,
+ "eval_samples_per_second": 2.545,
+ "eval_steps_per_second": 0.356,
+ "step": 40
+ },
+ {
+ "epoch": 1.3781512605042017,
+ "grad_norm": 0.1484375,
+ "learning_rate": 0.00016068001458185936,
+ "loss": 1.6501,
+ "step": 41
+ },
+ {
+ "epoch": 1.4117647058823528,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001582979479114472,
+ "loss": 1.6446,
+ "step": 42
+ },
+ {
+ "epoch": 1.4453781512605042,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00015586467658036524,
+ "loss": 1.7104,
+ "step": 43
+ },
+ {
+ "epoch": 1.4789915966386555,
+ "grad_norm": 0.109375,
+ "learning_rate": 0.0001533823377964791,
+ "loss": 1.6146,
+ "step": 44
+ },
+ {
+ "epoch": 1.5126050420168067,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00015085311186492206,
+ "loss": 1.6448,
+ "step": 45
+ },
+ {
+ "epoch": 1.5462184873949578,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00014827922027307451,
+ "loss": 1.6735,
+ "step": 46
+ },
+ {
+ "epoch": 1.5798319327731094,
+ "grad_norm": 0.1181640625,
+ "learning_rate": 0.0001456629237393713,
+ "loss": 1.6604,
+ "step": 47
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00014300652022765207,
+ "loss": 1.7046,
+ "step": 48
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "eval_loss": 1.7558497190475464,
+ "eval_runtime": 19.7723,
+ "eval_samples_per_second": 2.529,
+ "eval_steps_per_second": 0.354,
+ "step": 48
+ },
+ {
+ "epoch": 1.6470588235294117,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00014031234292879725,
+ "loss": 1.694,
+ "step": 49
+ },
+ {
+ "epoch": 1.680672268907563,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00013758275821142382,
+ "loss": 1.625,
+ "step": 50
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 0.123046875,
+ "learning_rate": 0.0001348201635434399,
+ "loss": 1.6919,
+ "step": 51
+ },
+ {
+ "epoch": 1.7478991596638656,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.00013202698538628376,
+ "loss": 1.6777,
+ "step": 52
+ },
+ {
+ "epoch": 1.7815126050420167,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00012920567706369758,
+ "loss": 1.7762,
+ "step": 53
+ },
+ {
+ "epoch": 1.815126050420168,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00012635871660690676,
+ "loss": 1.6668,
+ "step": 54
+ },
+ {
+ "epoch": 1.8487394957983194,
+ "grad_norm": 0.125,
+ "learning_rate": 0.00012348860457809838,
+ "loss": 1.7061,
+ "step": 55
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00012059786187410984,
+ "loss": 1.6714,
+ "step": 56
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "eval_loss": 1.7563551664352417,
+ "eval_runtime": 19.6588,
+ "eval_samples_per_second": 2.543,
+ "eval_steps_per_second": 0.356,
+ "step": 56
+ },
+ {
+ "epoch": 1.9159663865546217,
+ "grad_norm": 0.1279296875,
+ "learning_rate": 0.0001176890275122573,
+ "loss": 1.6318,
+ "step": 57
+ },
+ {
+ "epoch": 1.949579831932773,
+ "grad_norm": 0.1318359375,
+ "learning_rate": 0.00011476465640024814,
+ "loss": 1.6693,
+ "step": 58
+ },
+ {
+ "epoch": 1.9831932773109244,
+ "grad_norm": 0.12353515625,
+ "learning_rate": 0.00011182731709213659,
+ "loss": 1.5927,
+ "step": 59
+ },
+ {
+ "epoch": 2.0168067226890756,
+ "grad_norm": 0.138671875,
+ "learning_rate": 0.00010887958953229349,
+ "loss": 1.6558,
+ "step": 60
+ },
+ {
+ "epoch": 2.0504201680672267,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.00010592406278937144,
+ "loss": 1.7352,
+ "step": 61
+ },
+ {
+ "epoch": 2.0840336134453783,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00010296333278225599,
+ "loss": 1.6216,
+ "step": 62
+ },
+ {
+ "epoch": 2.1176470588235294,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.0001,
+ "loss": 1.6365,
+ "step": 63
+ },
+ {
+ "epoch": 2.1512605042016806,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 9.703666721774402e-05,
+ "loss": 1.6249,
+ "step": 64
+ },
+ {
+ "epoch": 2.1512605042016806,
+ "eval_loss": 1.756639838218689,
+ "eval_runtime": 19.5951,
+ "eval_samples_per_second": 2.552,
+ "eval_steps_per_second": 0.357,
+ "step": 64
+ },
+ {
+ "epoch": 2.184873949579832,
+ "grad_norm": 0.1357421875,
+ "learning_rate": 9.407593721062859e-05,
+ "loss": 1.653,
+ "step": 65
+ },
+ {
+ "epoch": 2.2184873949579833,
+ "grad_norm": 0.146484375,
+ "learning_rate": 9.112041046770653e-05,
+ "loss": 1.6545,
+ "step": 66
+ },
+ {
+ "epoch": 2.2521008403361344,
+ "grad_norm": 0.13671875,
+ "learning_rate": 8.817268290786343e-05,
+ "loss": 1.5787,
+ "step": 67
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 0.1328125,
+ "learning_rate": 8.523534359975189e-05,
+ "loss": 1.6532,
+ "step": 68
+ },
+ {
+ "epoch": 2.3193277310924367,
+ "grad_norm": 0.1396484375,
+ "learning_rate": 8.231097248774274e-05,
+ "loss": 1.6784,
+ "step": 69
+ },
+ {
+ "epoch": 2.3529411764705883,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 7.940213812589018e-05,
+ "loss": 1.5721,
+ "step": 70
+ },
+ {
+ "epoch": 2.3865546218487395,
+ "grad_norm": 0.1416015625,
+ "learning_rate": 7.651139542190164e-05,
+ "loss": 1.5836,
+ "step": 71
+ },
+ {
+ "epoch": 2.4201680672268906,
+ "grad_norm": 0.146484375,
+ "learning_rate": 7.364128339309326e-05,
+ "loss": 1.5604,
+ "step": 72
+ },
+ {
+ "epoch": 2.4201680672268906,
+ "eval_loss": 1.7598735094070435,
+ "eval_runtime": 19.7508,
+ "eval_samples_per_second": 2.532,
+ "eval_steps_per_second": 0.354,
+ "step": 72
+ },
+ {
+ "epoch": 2.453781512605042,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 7.079432293630244e-05,
+ "loss": 1.6259,
+ "step": 73
+ },
+ {
+ "epoch": 2.4873949579831933,
+ "grad_norm": 0.1484375,
+ "learning_rate": 6.797301461371625e-05,
+ "loss": 1.5811,
+ "step": 74
+ },
+ {
+ "epoch": 2.5210084033613445,
+ "grad_norm": 0.14453125,
+ "learning_rate": 6.517983645656014e-05,
+ "loss": 1.4929,
+ "step": 75
+ },
+ {
+ "epoch": 2.5546218487394956,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 6.24172417885762e-05,
+ "loss": 1.7014,
+ "step": 76
+ },
+ {
+ "epoch": 2.588235294117647,
+ "grad_norm": 0.1484375,
+ "learning_rate": 5.96876570712028e-05,
+ "loss": 1.5623,
+ "step": 77
+ },
+ {
+ "epoch": 2.6218487394957983,
+ "grad_norm": 0.1474609375,
+ "learning_rate": 5.699347977234799e-05,
+ "loss": 1.6006,
+ "step": 78
+ },
+ {
+ "epoch": 2.6554621848739495,
+ "grad_norm": 0.150390625,
+ "learning_rate": 5.43370762606287e-05,
+ "loss": 1.6641,
+ "step": 79
+ },
+ {
+ "epoch": 2.689075630252101,
+ "grad_norm": 0.15234375,
+ "learning_rate": 5.172077972692553e-05,
+ "loss": 1.7003,
+ "step": 80
+ },
+ {
+ "epoch": 2.689075630252101,
+ "eval_loss": 1.761399269104004,
+ "eval_runtime": 19.6692,
+ "eval_samples_per_second": 2.542,
+ "eval_steps_per_second": 0.356,
+ "step": 80
+ },
+ {
+ "epoch": 2.722689075630252,
+ "grad_norm": 0.154296875,
+ "learning_rate": 4.914688813507797e-05,
+ "loss": 1.6923,
+ "step": 81
+ },
+ {
+ "epoch": 2.7563025210084033,
+ "grad_norm": 0.158203125,
+ "learning_rate": 4.661766220352097e-05,
+ "loss": 1.6819,
+ "step": 82
+ },
+ {
+ "epoch": 2.7899159663865545,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 4.4135323419634766e-05,
+ "loss": 1.5649,
+ "step": 83
+ },
+ {
+ "epoch": 2.8235294117647056,
+ "grad_norm": 0.1494140625,
+ "learning_rate": 4.170205208855281e-05,
+ "loss": 1.608,
+ "step": 84
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 0.1611328125,
+ "learning_rate": 3.931998541814069e-05,
+ "loss": 1.5474,
+ "step": 85
+ },
+ {
+ "epoch": 2.8907563025210083,
+ "grad_norm": 0.1552734375,
+ "learning_rate": 3.69912156418289e-05,
+ "loss": 1.6484,
+ "step": 86
+ },
+ {
+ "epoch": 2.92436974789916,
+ "grad_norm": 0.1484375,
+ "learning_rate": 3.471778818094785e-05,
+ "loss": 1.6145,
+ "step": 87
+ },
+ {
+ "epoch": 2.957983193277311,
+ "grad_norm": 0.158203125,
+ "learning_rate": 3.250169984817897e-05,
+ "loss": 1.7115,
+ "step": 88
+ },
+ {
+ "epoch": 2.957983193277311,
+ "eval_loss": 1.7605273723602295,
+ "eval_runtime": 19.7632,
+ "eval_samples_per_second": 2.53,
+ "eval_steps_per_second": 0.354,
+ "step": 88
+ },
+ {
+ "epoch": 2.991596638655462,
+ "grad_norm": 0.1591796875,
+ "learning_rate": 3.034489709370033e-05,
+ "loss": 1.6485,
+ "step": 89
+ },
+ {
+ "epoch": 3.0252100840336134,
+ "grad_norm": 0.16015625,
+ "learning_rate": 2.8249274295566864e-05,
+ "loss": 1.5097,
+ "step": 90
+ },
+ {
+ "epoch": 3.0588235294117645,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 2.6216672095827266e-05,
+ "loss": 1.5918,
+ "step": 91
+ },
+ {
+ "epoch": 3.092436974789916,
+ "grad_norm": 0.1484375,
+ "learning_rate": 2.4248875783837987e-05,
+ "loss": 1.6116,
+ "step": 92
+ },
+ {
+ "epoch": 3.1260504201680672,
+ "grad_norm": 0.1552734375,
+ "learning_rate": 2.234761372819577e-05,
+ "loss": 1.5989,
+ "step": 93
+ },
+ {
+ "epoch": 3.1596638655462184,
+ "grad_norm": 0.154296875,
+ "learning_rate": 2.0514555858664663e-05,
+ "loss": 1.6061,
+ "step": 94
+ },
+ {
+ "epoch": 3.19327731092437,
+ "grad_norm": 0.16015625,
+ "learning_rate": 1.875131219943187e-05,
+ "loss": 1.549,
+ "step": 95
+ },
+ {
+ "epoch": 3.226890756302521,
+ "grad_norm": 0.15625,
+ "learning_rate": 1.7059431454979824e-05,
+ "loss": 1.5937,
+ "step": 96
+ },
+ {
+ "epoch": 3.226890756302521,
+ "eval_loss": 1.7609126567840576,
+ "eval_runtime": 19.7273,
+ "eval_samples_per_second": 2.535,
+ "eval_steps_per_second": 0.355,
+ "step": 96
+ },
+ {
+ "epoch": 3.2605042016806722,
+ "grad_norm": 0.1611328125,
+ "learning_rate": 1.5440399649817385e-05,
+ "loss": 1.6636,
+ "step": 97
+ },
+ {
+ "epoch": 3.2941176470588234,
+ "grad_norm": 0.15625,
+ "learning_rate": 1.3895638823264446e-05,
+ "loss": 1.6452,
+ "step": 98
+ },
+ {
+ "epoch": 3.327731092436975,
+ "grad_norm": 0.146484375,
+ "learning_rate": 1.2426505780436326e-05,
+ "loss": 1.6122,
+ "step": 99
+ },
+ {
+ "epoch": 3.361344537815126,
+ "grad_norm": 0.154296875,
+ "learning_rate": 1.103429090052528e-05,
+ "loss": 1.7153,
+ "step": 100
+ },
+ {
+ "epoch": 3.3949579831932772,
+ "grad_norm": 0.154296875,
+ "learning_rate": 9.720217003425647e-06,
+ "loss": 1.5094,
+ "step": 101
+ },
+ {
+ "epoch": 3.4285714285714284,
+ "grad_norm": 0.146484375,
+ "learning_rate": 8.485438275698154e-06,
+ "loss": 1.5703,
+ "step": 102
+ },
+ {
+ "epoch": 3.46218487394958,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 7.331039256816663e-06,
+ "loss": 1.5971,
+ "step": 103
+ },
+ {
+ "epoch": 3.495798319327731,
+ "grad_norm": 0.158203125,
+ "learning_rate": 6.258033886587911e-06,
+ "loss": 1.655,
+ "step": 104
+ },
+ {
+ "epoch": 3.495798319327731,
+ "eval_loss": 1.7612451314926147,
+ "eval_runtime": 19.7385,
+ "eval_samples_per_second": 2.533,
+ "eval_steps_per_second": 0.355,
+ "step": 104
+ },
+ {
+ "epoch": 3.5294117647058822,
+ "grad_norm": 0.158203125,
+ "learning_rate": 5.267364614580861e-06,
+ "loss": 1.6928,
+ "step": 105
+ },
+ {
+ "epoch": 3.5630252100840334,
+ "grad_norm": 0.1474609375,
+ "learning_rate": 4.359901572347758e-06,
+ "loss": 1.6083,
+ "step": 106
+ },
+ {
+ "epoch": 3.596638655462185,
+ "grad_norm": 0.1591796875,
+ "learning_rate": 3.5364418091641373e-06,
+ "loss": 1.6547,
+ "step": 107
+ },
+ {
+ "epoch": 3.630252100840336,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 2.7977085919589254e-06,
+ "loss": 1.5425,
+ "step": 108
+ },
+ {
+ "epoch": 3.6638655462184873,
+ "grad_norm": 0.154296875,
+ "learning_rate": 2.144350770049597e-06,
+ "loss": 1.5763,
+ "step": 109
+ },
+ {
+ "epoch": 3.697478991596639,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 1.576942205240317e-06,
+ "loss": 1.513,
+ "step": 110
+ },
+ {
+ "epoch": 3.73109243697479,
+ "grad_norm": 0.15234375,
+ "learning_rate": 1.0959812677835968e-06,
+ "loss": 1.5338,
+ "step": 111
+ },
+ {
+ "epoch": 3.764705882352941,
+ "grad_norm": 0.150390625,
+ "learning_rate": 7.018903986483083e-07,
+ "loss": 1.5829,
+ "step": 112
+ },
+ {
+ "epoch": 3.764705882352941,
+ "eval_loss": 1.7612619400024414,
+ "eval_runtime": 19.83,
+ "eval_samples_per_second": 2.521,
+ "eval_steps_per_second": 0.353,
+ "step": 112
+ },
+ {
+ "epoch": 3.7983193277310923,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 3.950157384783104e-07,
+ "loss": 1.5591,
+ "step": 113
+ },
+ {
+ "epoch": 3.831932773109244,
+ "grad_norm": 0.1533203125,
+ "learning_rate": 1.7562682356786487e-07,
+ "loss": 1.6213,
+ "step": 114
+ },
+ {
+ "epoch": 3.865546218487395,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 4.391634912056519e-08,
+ "loss": 1.6246,
+ "step": 115
+ },
+ {
+ "epoch": 3.899159663865546,
+ "grad_norm": 0.1591796875,
+ "learning_rate": 0.0,
+ "loss": 1.5533,
+ "step": 116
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 116,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 29,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 1.3243100413820928e+16,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/content/outputs/qlora-out/checkpoint-116/training_args.bin b/content/outputs/qlora-out/checkpoint-116/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c07aacc14ae35e4961f88179b331f061deef6ee1
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-116/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f22338230e6fb5499b3cb737d6ee991fc1b36ba21747e343dbc66d770479d2b
+size 5944
diff --git a/content/outputs/qlora-out/checkpoint-29/README.md b/content/outputs/qlora-out/checkpoint-29/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1ccd431539a8f1507d8755a9c3ba5e5b2897978
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.11.1
\ No newline at end of file
diff --git a/content/outputs/qlora-out/checkpoint-29/adapter_config.json b/content/outputs/qlora-out/checkpoint-29/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/content/outputs/qlora-out/checkpoint-29/adapter_model.safetensors b/content/outputs/qlora-out/checkpoint-29/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5e0844b9cf95a1f91107276689c6a204f10d1830
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f3306c7c3271b6df472652a3e758371296cdd9260f28eae96c6d5c93e8f261b
+size 50503848
diff --git a/content/outputs/qlora-out/checkpoint-29/optimizer.pt b/content/outputs/qlora-out/checkpoint-29/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a18c2f4e2286fb9f8b4470cf86d94bbfe6610def
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60450a89e284ab0412512635b6713a0507d3a55dc20cb18cd142230fe92e18c8
+size 202035450
diff --git a/content/outputs/qlora-out/checkpoint-29/rng_state.pth b/content/outputs/qlora-out/checkpoint-29/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b6fa1b1b3c6ac0284a020fecd5590ae0ab72dea9
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f5dff5ca7ee6d9737c0fb532125f4108aa3bd942be0c6c415c0eee299436cfee
+size 14244
diff --git a/content/outputs/qlora-out/checkpoint-29/scheduler.pt b/content/outputs/qlora-out/checkpoint-29/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6f063c1a6852ef8c3a95f27c4a7fddee4c12090d
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82e0ea69a0f2d46a8611802e20de4ba9ab4c81307121d85f58745fed7e6bfae6
+size 1064
diff --git a/content/outputs/qlora-out/checkpoint-29/special_tokens_map.json b/content/outputs/qlora-out/checkpoint-29/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/content/outputs/qlora-out/checkpoint-29/tokenizer.model b/content/outputs/qlora-out/checkpoint-29/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/content/outputs/qlora-out/checkpoint-29/tokenizer_config.json b/content/outputs/qlora-out/checkpoint-29/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/content/outputs/qlora-out/checkpoint-29/trainer_state.json b/content/outputs/qlora-out/checkpoint-29/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..dd095afd7011bcf0f72a191bef8f28dee83a3a87
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/trainer_state.json
@@ -0,0 +1,268 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.9747899159663865,
+ "eval_steps": 8,
+ "global_step": 29,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.03361344537815126,
+ "grad_norm": 0.115234375,
+ "learning_rate": 2e-05,
+ "loss": 1.768,
+ "step": 1
+ },
+ {
+ "epoch": 0.03361344537815126,
+ "eval_loss": 1.8648816347122192,
+ "eval_runtime": 18.2501,
+ "eval_samples_per_second": 2.74,
+ "eval_steps_per_second": 0.384,
+ "step": 1
+ },
+ {
+ "epoch": 0.06722689075630252,
+ "grad_norm": 0.10888671875,
+ "learning_rate": 4e-05,
+ "loss": 1.7838,
+ "step": 2
+ },
+ {
+ "epoch": 0.10084033613445378,
+ "grad_norm": 0.126953125,
+ "learning_rate": 6e-05,
+ "loss": 1.9413,
+ "step": 3
+ },
+ {
+ "epoch": 0.13445378151260504,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 8e-05,
+ "loss": 1.7757,
+ "step": 4
+ },
+ {
+ "epoch": 0.16806722689075632,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.0001,
+ "loss": 1.735,
+ "step": 5
+ },
+ {
+ "epoch": 0.20168067226890757,
+ "grad_norm": 0.10791015625,
+ "learning_rate": 0.00012,
+ "loss": 1.8269,
+ "step": 6
+ },
+ {
+ "epoch": 0.23529411764705882,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00014,
+ "loss": 1.8552,
+ "step": 7
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00016,
+ "loss": 1.8084,
+ "step": 8
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "eval_loss": 1.8317129611968994,
+ "eval_runtime": 19.6984,
+ "eval_samples_per_second": 2.538,
+ "eval_steps_per_second": 0.355,
+ "step": 8
+ },
+ {
+ "epoch": 0.3025210084033613,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.00018,
+ "loss": 1.7158,
+ "step": 9
+ },
+ {
+ "epoch": 0.33613445378151263,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0002,
+ "loss": 1.8702,
+ "step": 10
+ },
+ {
+ "epoch": 0.3697478991596639,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00019995608365087946,
+ "loss": 1.8307,
+ "step": 11
+ },
+ {
+ "epoch": 0.40336134453781514,
+ "grad_norm": 0.11474609375,
+ "learning_rate": 0.00019982437317643217,
+ "loss": 1.6583,
+ "step": 12
+ },
+ {
+ "epoch": 0.4369747899159664,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0001996049842615217,
+ "loss": 1.6663,
+ "step": 13
+ },
+ {
+ "epoch": 0.47058823529411764,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00019929810960135172,
+ "loss": 1.7388,
+ "step": 14
+ },
+ {
+ "epoch": 0.5042016806722689,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.0001989040187322164,
+ "loss": 1.7485,
+ "step": 15
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00019842305779475968,
+ "loss": 1.633,
+ "step": 16
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "eval_loss": 1.7832777500152588,
+ "eval_runtime": 19.6833,
+ "eval_samples_per_second": 2.54,
+ "eval_steps_per_second": 0.356,
+ "step": 16
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 0.12109375,
+ "learning_rate": 0.0001978556492299504,
+ "loss": 1.8373,
+ "step": 17
+ },
+ {
+ "epoch": 0.6050420168067226,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 0.0001972022914080411,
+ "loss": 1.6552,
+ "step": 18
+ },
+ {
+ "epoch": 0.6386554621848739,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019646355819083589,
+ "loss": 1.8113,
+ "step": 19
+ },
+ {
+ "epoch": 0.6722689075630253,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00019564009842765225,
+ "loss": 1.6544,
+ "step": 20
+ },
+ {
+ "epoch": 0.7058823529411765,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00019473263538541914,
+ "loss": 1.6649,
+ "step": 21
+ },
+ {
+ "epoch": 0.7394957983193278,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 0.0001937419661134121,
+ "loss": 1.6868,
+ "step": 22
+ },
+ {
+ "epoch": 0.773109243697479,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019266896074318334,
+ "loss": 1.7762,
+ "step": 23
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "grad_norm": 0.11279296875,
+ "learning_rate": 0.00019151456172430183,
+ "loss": 1.6737,
+ "step": 24
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "eval_loss": 1.7643933296203613,
+ "eval_runtime": 19.6308,
+ "eval_samples_per_second": 2.547,
+ "eval_steps_per_second": 0.357,
+ "step": 24
+ },
+ {
+ "epoch": 0.8403361344537815,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 0.00019027978299657436,
+ "loss": 1.6401,
+ "step": 25
+ },
+ {
+ "epoch": 0.8739495798319328,
+ "grad_norm": 0.099609375,
+ "learning_rate": 0.00018896570909947475,
+ "loss": 1.7068,
+ "step": 26
+ },
+ {
+ "epoch": 0.907563025210084,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.0001875734942195637,
+ "loss": 1.8112,
+ "step": 27
+ },
+ {
+ "epoch": 0.9411764705882353,
+ "grad_norm": 0.1162109375,
+ "learning_rate": 0.00018610436117673555,
+ "loss": 1.6596,
+ "step": 28
+ },
+ {
+ "epoch": 0.9747899159663865,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.0001845596003501826,
+ "loss": 1.7936,
+ "step": 29
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 116,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 29,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 3366220896141312.0,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/content/outputs/qlora-out/checkpoint-29/training_args.bin b/content/outputs/qlora-out/checkpoint-29/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c07aacc14ae35e4961f88179b331f061deef6ee1
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-29/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f22338230e6fb5499b3cb737d6ee991fc1b36ba21747e343dbc66d770479d2b
+size 5944
diff --git a/content/outputs/qlora-out/checkpoint-58/README.md b/content/outputs/qlora-out/checkpoint-58/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1ccd431539a8f1507d8755a9c3ba5e5b2897978
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.11.1
\ No newline at end of file
diff --git a/content/outputs/qlora-out/checkpoint-58/adapter_config.json b/content/outputs/qlora-out/checkpoint-58/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/content/outputs/qlora-out/checkpoint-58/adapter_model.safetensors b/content/outputs/qlora-out/checkpoint-58/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bfd700946ae485ed0a53abce0fa0d65c511cb467
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f07ef1ad6cfdd6061f1cad1529524d822d6c21409b40d6ca2c11e7a2ebd9b03f
+size 50503848
diff --git a/content/outputs/qlora-out/checkpoint-58/optimizer.pt b/content/outputs/qlora-out/checkpoint-58/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c700fe51d3f01608933be8ff25ecc184b8829f27
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:712ca45af3a346ebbc26929883fe200d0d4fa0625f5f02944a41b2721a0dac4f
+size 202035450
diff --git a/content/outputs/qlora-out/checkpoint-58/rng_state.pth b/content/outputs/qlora-out/checkpoint-58/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..709982b4f1bb181ce340f82c833859051709cf88
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e2714773be96a258d965b3d5be5d9e51b91e01c435077d016f8a6cad6f5455b
+size 14244
diff --git a/content/outputs/qlora-out/checkpoint-58/scheduler.pt b/content/outputs/qlora-out/checkpoint-58/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1e377bc38d38b096d2bcf506ad1e35afcd94de1c
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2ffd843f86241f649ae5523a8aa7c9d13157ef6bc9ba4fe819ec88f7f7923587
+size 1064
diff --git a/content/outputs/qlora-out/checkpoint-58/special_tokens_map.json b/content/outputs/qlora-out/checkpoint-58/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/content/outputs/qlora-out/checkpoint-58/tokenizer.model b/content/outputs/qlora-out/checkpoint-58/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/content/outputs/qlora-out/checkpoint-58/tokenizer_config.json b/content/outputs/qlora-out/checkpoint-58/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/content/outputs/qlora-out/checkpoint-58/trainer_state.json b/content/outputs/qlora-out/checkpoint-58/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..d3e2499c7d8b3c7b77bc47e28147ef2252b36342
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/trainer_state.json
@@ -0,0 +1,503 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.949579831932773,
+ "eval_steps": 8,
+ "global_step": 58,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.03361344537815126,
+ "grad_norm": 0.115234375,
+ "learning_rate": 2e-05,
+ "loss": 1.768,
+ "step": 1
+ },
+ {
+ "epoch": 0.03361344537815126,
+ "eval_loss": 1.8648816347122192,
+ "eval_runtime": 18.2501,
+ "eval_samples_per_second": 2.74,
+ "eval_steps_per_second": 0.384,
+ "step": 1
+ },
+ {
+ "epoch": 0.06722689075630252,
+ "grad_norm": 0.10888671875,
+ "learning_rate": 4e-05,
+ "loss": 1.7838,
+ "step": 2
+ },
+ {
+ "epoch": 0.10084033613445378,
+ "grad_norm": 0.126953125,
+ "learning_rate": 6e-05,
+ "loss": 1.9413,
+ "step": 3
+ },
+ {
+ "epoch": 0.13445378151260504,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 8e-05,
+ "loss": 1.7757,
+ "step": 4
+ },
+ {
+ "epoch": 0.16806722689075632,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.0001,
+ "loss": 1.735,
+ "step": 5
+ },
+ {
+ "epoch": 0.20168067226890757,
+ "grad_norm": 0.10791015625,
+ "learning_rate": 0.00012,
+ "loss": 1.8269,
+ "step": 6
+ },
+ {
+ "epoch": 0.23529411764705882,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00014,
+ "loss": 1.8552,
+ "step": 7
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00016,
+ "loss": 1.8084,
+ "step": 8
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "eval_loss": 1.8317129611968994,
+ "eval_runtime": 19.6984,
+ "eval_samples_per_second": 2.538,
+ "eval_steps_per_second": 0.355,
+ "step": 8
+ },
+ {
+ "epoch": 0.3025210084033613,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.00018,
+ "loss": 1.7158,
+ "step": 9
+ },
+ {
+ "epoch": 0.33613445378151263,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0002,
+ "loss": 1.8702,
+ "step": 10
+ },
+ {
+ "epoch": 0.3697478991596639,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00019995608365087946,
+ "loss": 1.8307,
+ "step": 11
+ },
+ {
+ "epoch": 0.40336134453781514,
+ "grad_norm": 0.11474609375,
+ "learning_rate": 0.00019982437317643217,
+ "loss": 1.6583,
+ "step": 12
+ },
+ {
+ "epoch": 0.4369747899159664,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0001996049842615217,
+ "loss": 1.6663,
+ "step": 13
+ },
+ {
+ "epoch": 0.47058823529411764,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00019929810960135172,
+ "loss": 1.7388,
+ "step": 14
+ },
+ {
+ "epoch": 0.5042016806722689,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.0001989040187322164,
+ "loss": 1.7485,
+ "step": 15
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00019842305779475968,
+ "loss": 1.633,
+ "step": 16
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "eval_loss": 1.7832777500152588,
+ "eval_runtime": 19.6833,
+ "eval_samples_per_second": 2.54,
+ "eval_steps_per_second": 0.356,
+ "step": 16
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 0.12109375,
+ "learning_rate": 0.0001978556492299504,
+ "loss": 1.8373,
+ "step": 17
+ },
+ {
+ "epoch": 0.6050420168067226,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 0.0001972022914080411,
+ "loss": 1.6552,
+ "step": 18
+ },
+ {
+ "epoch": 0.6386554621848739,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019646355819083589,
+ "loss": 1.8113,
+ "step": 19
+ },
+ {
+ "epoch": 0.6722689075630253,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00019564009842765225,
+ "loss": 1.6544,
+ "step": 20
+ },
+ {
+ "epoch": 0.7058823529411765,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00019473263538541914,
+ "loss": 1.6649,
+ "step": 21
+ },
+ {
+ "epoch": 0.7394957983193278,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 0.0001937419661134121,
+ "loss": 1.6868,
+ "step": 22
+ },
+ {
+ "epoch": 0.773109243697479,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019266896074318334,
+ "loss": 1.7762,
+ "step": 23
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "grad_norm": 0.11279296875,
+ "learning_rate": 0.00019151456172430183,
+ "loss": 1.6737,
+ "step": 24
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "eval_loss": 1.7643933296203613,
+ "eval_runtime": 19.6308,
+ "eval_samples_per_second": 2.547,
+ "eval_steps_per_second": 0.357,
+ "step": 24
+ },
+ {
+ "epoch": 0.8403361344537815,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 0.00019027978299657436,
+ "loss": 1.6401,
+ "step": 25
+ },
+ {
+ "epoch": 0.8739495798319328,
+ "grad_norm": 0.099609375,
+ "learning_rate": 0.00018896570909947475,
+ "loss": 1.7068,
+ "step": 26
+ },
+ {
+ "epoch": 0.907563025210084,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.0001875734942195637,
+ "loss": 1.8112,
+ "step": 27
+ },
+ {
+ "epoch": 0.9411764705882353,
+ "grad_norm": 0.1162109375,
+ "learning_rate": 0.00018610436117673555,
+ "loss": 1.6596,
+ "step": 28
+ },
+ {
+ "epoch": 0.9747899159663865,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.0001845596003501826,
+ "loss": 1.7936,
+ "step": 29
+ },
+ {
+ "epoch": 1.0084033613445378,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001829405685450202,
+ "loss": 1.7947,
+ "step": 30
+ },
+ {
+ "epoch": 1.0420168067226891,
+ "grad_norm": 0.20703125,
+ "learning_rate": 0.00018124868780056814,
+ "loss": 1.6887,
+ "step": 31
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 0.00017948544414133534,
+ "loss": 1.6722,
+ "step": 32
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "eval_loss": 1.7600828409194946,
+ "eval_runtime": 19.7105,
+ "eval_samples_per_second": 2.537,
+ "eval_steps_per_second": 0.355,
+ "step": 32
+ },
+ {
+ "epoch": 1.1092436974789917,
+ "grad_norm": 0.09814453125,
+ "learning_rate": 0.00017765238627180424,
+ "loss": 1.7145,
+ "step": 33
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 0.10693359375,
+ "learning_rate": 0.00017575112421616202,
+ "loss": 1.6609,
+ "step": 34
+ },
+ {
+ "epoch": 1.1764705882352942,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00017378332790417273,
+ "loss": 1.6681,
+ "step": 35
+ },
+ {
+ "epoch": 1.2100840336134453,
+ "grad_norm": 0.11767578125,
+ "learning_rate": 0.00017175072570443312,
+ "loss": 1.6641,
+ "step": 36
+ },
+ {
+ "epoch": 1.2436974789915967,
+ "grad_norm": 0.11376953125,
+ "learning_rate": 0.00016965510290629972,
+ "loss": 1.7011,
+ "step": 37
+ },
+ {
+ "epoch": 1.2773109243697478,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00016749830015182107,
+ "loss": 1.7171,
+ "step": 38
+ },
+ {
+ "epoch": 1.3109243697478992,
+ "grad_norm": 0.103515625,
+ "learning_rate": 0.00016528221181905217,
+ "loss": 1.6333,
+ "step": 39
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "grad_norm": 0.111328125,
+ "learning_rate": 0.00016300878435817113,
+ "loss": 1.7162,
+ "step": 40
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "eval_loss": 1.757140040397644,
+ "eval_runtime": 19.6485,
+ "eval_samples_per_second": 2.545,
+ "eval_steps_per_second": 0.356,
+ "step": 40
+ },
+ {
+ "epoch": 1.3781512605042017,
+ "grad_norm": 0.1484375,
+ "learning_rate": 0.00016068001458185936,
+ "loss": 1.6501,
+ "step": 41
+ },
+ {
+ "epoch": 1.4117647058823528,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001582979479114472,
+ "loss": 1.6446,
+ "step": 42
+ },
+ {
+ "epoch": 1.4453781512605042,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00015586467658036524,
+ "loss": 1.7104,
+ "step": 43
+ },
+ {
+ "epoch": 1.4789915966386555,
+ "grad_norm": 0.109375,
+ "learning_rate": 0.0001533823377964791,
+ "loss": 1.6146,
+ "step": 44
+ },
+ {
+ "epoch": 1.5126050420168067,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00015085311186492206,
+ "loss": 1.6448,
+ "step": 45
+ },
+ {
+ "epoch": 1.5462184873949578,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00014827922027307451,
+ "loss": 1.6735,
+ "step": 46
+ },
+ {
+ "epoch": 1.5798319327731094,
+ "grad_norm": 0.1181640625,
+ "learning_rate": 0.0001456629237393713,
+ "loss": 1.6604,
+ "step": 47
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00014300652022765207,
+ "loss": 1.7046,
+ "step": 48
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "eval_loss": 1.7558497190475464,
+ "eval_runtime": 19.7723,
+ "eval_samples_per_second": 2.529,
+ "eval_steps_per_second": 0.354,
+ "step": 48
+ },
+ {
+ "epoch": 1.6470588235294117,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00014031234292879725,
+ "loss": 1.694,
+ "step": 49
+ },
+ {
+ "epoch": 1.680672268907563,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00013758275821142382,
+ "loss": 1.625,
+ "step": 50
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 0.123046875,
+ "learning_rate": 0.0001348201635434399,
+ "loss": 1.6919,
+ "step": 51
+ },
+ {
+ "epoch": 1.7478991596638656,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.00013202698538628376,
+ "loss": 1.6777,
+ "step": 52
+ },
+ {
+ "epoch": 1.7815126050420167,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00012920567706369758,
+ "loss": 1.7762,
+ "step": 53
+ },
+ {
+ "epoch": 1.815126050420168,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00012635871660690676,
+ "loss": 1.6668,
+ "step": 54
+ },
+ {
+ "epoch": 1.8487394957983194,
+ "grad_norm": 0.125,
+ "learning_rate": 0.00012348860457809838,
+ "loss": 1.7061,
+ "step": 55
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00012059786187410984,
+ "loss": 1.6714,
+ "step": 56
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "eval_loss": 1.7563551664352417,
+ "eval_runtime": 19.6588,
+ "eval_samples_per_second": 2.543,
+ "eval_steps_per_second": 0.356,
+ "step": 56
+ },
+ {
+ "epoch": 1.9159663865546217,
+ "grad_norm": 0.1279296875,
+ "learning_rate": 0.0001176890275122573,
+ "loss": 1.6318,
+ "step": 57
+ },
+ {
+ "epoch": 1.949579831932773,
+ "grad_norm": 0.1318359375,
+ "learning_rate": 0.00011476465640024814,
+ "loss": 1.6693,
+ "step": 58
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 116,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 29,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 6643321582387200.0,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/content/outputs/qlora-out/checkpoint-58/training_args.bin b/content/outputs/qlora-out/checkpoint-58/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c07aacc14ae35e4961f88179b331f061deef6ee1
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-58/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f22338230e6fb5499b3cb737d6ee991fc1b36ba21747e343dbc66d770479d2b
+size 5944
diff --git a/content/outputs/qlora-out/checkpoint-87/README.md b/content/outputs/qlora-out/checkpoint-87/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1ccd431539a8f1507d8755a9c3ba5e5b2897978
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/README.md
@@ -0,0 +1,202 @@
+---
+library_name: peft
+base_model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+### Framework versions
+
+- PEFT 0.11.1
\ No newline at end of file
diff --git a/content/outputs/qlora-out/checkpoint-87/adapter_config.json b/content/outputs/qlora-out/checkpoint-87/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc457bf3bff0c77122f275fc2e3f1077b79e130e
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/adapter_config.json
@@ -0,0 +1,34 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "bias": "none",
+ "fan_in_fan_out": null,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layer_replication": null,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 32,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "gate_proj",
+ "up_proj",
+ "down_proj",
+ "v_proj",
+ "o_proj",
+ "q_proj",
+ "k_proj"
+ ],
+ "task_type": "CAUSAL_LM",
+ "use_dora": false,
+ "use_rslora": false
+}
\ No newline at end of file
diff --git a/content/outputs/qlora-out/checkpoint-87/adapter_model.safetensors b/content/outputs/qlora-out/checkpoint-87/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c1d20d28ff81e0ed0999329ceb1ef1f8b07218e3
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4c119a868a2f168125d890295ff7e7d1e8709c364ae22393fd56670ef283bd84
+size 50503848
diff --git a/content/outputs/qlora-out/checkpoint-87/optimizer.pt b/content/outputs/qlora-out/checkpoint-87/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..585f4ca53e1081a671ad6811e9c49c8f0b15b380
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be530a8fa8568da1149a6eb3fa1e0d3e6641ab7b50fc6a724480c58020d8db51
+size 202035450
diff --git a/content/outputs/qlora-out/checkpoint-87/rng_state.pth b/content/outputs/qlora-out/checkpoint-87/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b879ea1ffc28828c28e813f2f6c0593a8ac8796a
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abee6b45a92cc12ab71fa5ef10750e86110e096fada3acf2955d86fe41f121d3
+size 14244
diff --git a/content/outputs/qlora-out/checkpoint-87/scheduler.pt b/content/outputs/qlora-out/checkpoint-87/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..769177b071d8fbc5d172d02f4a8b213b334b7c86
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:96dfdc4ac2eb14a7a5283672324c02dd21283974614857cbf29d0d55e24e2b3f
+size 1064
diff --git a/content/outputs/qlora-out/checkpoint-87/special_tokens_map.json b/content/outputs/qlora-out/checkpoint-87/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/content/outputs/qlora-out/checkpoint-87/tokenizer.model b/content/outputs/qlora-out/checkpoint-87/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/content/outputs/qlora-out/checkpoint-87/tokenizer_config.json b/content/outputs/qlora-out/checkpoint-87/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/content/outputs/qlora-out/checkpoint-87/trainer_state.json b/content/outputs/qlora-out/checkpoint-87/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..82ed7f3515632072d49c2daaead6ee981950aea0
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/trainer_state.json
@@ -0,0 +1,730 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.92436974789916,
+ "eval_steps": 8,
+ "global_step": 87,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.03361344537815126,
+ "grad_norm": 0.115234375,
+ "learning_rate": 2e-05,
+ "loss": 1.768,
+ "step": 1
+ },
+ {
+ "epoch": 0.03361344537815126,
+ "eval_loss": 1.8648816347122192,
+ "eval_runtime": 18.2501,
+ "eval_samples_per_second": 2.74,
+ "eval_steps_per_second": 0.384,
+ "step": 1
+ },
+ {
+ "epoch": 0.06722689075630252,
+ "grad_norm": 0.10888671875,
+ "learning_rate": 4e-05,
+ "loss": 1.7838,
+ "step": 2
+ },
+ {
+ "epoch": 0.10084033613445378,
+ "grad_norm": 0.126953125,
+ "learning_rate": 6e-05,
+ "loss": 1.9413,
+ "step": 3
+ },
+ {
+ "epoch": 0.13445378151260504,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 8e-05,
+ "loss": 1.7757,
+ "step": 4
+ },
+ {
+ "epoch": 0.16806722689075632,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.0001,
+ "loss": 1.735,
+ "step": 5
+ },
+ {
+ "epoch": 0.20168067226890757,
+ "grad_norm": 0.10791015625,
+ "learning_rate": 0.00012,
+ "loss": 1.8269,
+ "step": 6
+ },
+ {
+ "epoch": 0.23529411764705882,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00014,
+ "loss": 1.8552,
+ "step": 7
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00016,
+ "loss": 1.8084,
+ "step": 8
+ },
+ {
+ "epoch": 0.2689075630252101,
+ "eval_loss": 1.8317129611968994,
+ "eval_runtime": 19.6984,
+ "eval_samples_per_second": 2.538,
+ "eval_steps_per_second": 0.355,
+ "step": 8
+ },
+ {
+ "epoch": 0.3025210084033613,
+ "grad_norm": 0.12255859375,
+ "learning_rate": 0.00018,
+ "loss": 1.7158,
+ "step": 9
+ },
+ {
+ "epoch": 0.33613445378151263,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0002,
+ "loss": 1.8702,
+ "step": 10
+ },
+ {
+ "epoch": 0.3697478991596639,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00019995608365087946,
+ "loss": 1.8307,
+ "step": 11
+ },
+ {
+ "epoch": 0.40336134453781514,
+ "grad_norm": 0.11474609375,
+ "learning_rate": 0.00019982437317643217,
+ "loss": 1.6583,
+ "step": 12
+ },
+ {
+ "epoch": 0.4369747899159664,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.0001996049842615217,
+ "loss": 1.6663,
+ "step": 13
+ },
+ {
+ "epoch": 0.47058823529411764,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00019929810960135172,
+ "loss": 1.7388,
+ "step": 14
+ },
+ {
+ "epoch": 0.5042016806722689,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.0001989040187322164,
+ "loss": 1.7485,
+ "step": 15
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00019842305779475968,
+ "loss": 1.633,
+ "step": 16
+ },
+ {
+ "epoch": 0.5378151260504201,
+ "eval_loss": 1.7832777500152588,
+ "eval_runtime": 19.6833,
+ "eval_samples_per_second": 2.54,
+ "eval_steps_per_second": 0.356,
+ "step": 16
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 0.12109375,
+ "learning_rate": 0.0001978556492299504,
+ "loss": 1.8373,
+ "step": 17
+ },
+ {
+ "epoch": 0.6050420168067226,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 0.0001972022914080411,
+ "loss": 1.6552,
+ "step": 18
+ },
+ {
+ "epoch": 0.6386554621848739,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019646355819083589,
+ "loss": 1.8113,
+ "step": 19
+ },
+ {
+ "epoch": 0.6722689075630253,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00019564009842765225,
+ "loss": 1.6544,
+ "step": 20
+ },
+ {
+ "epoch": 0.7058823529411765,
+ "grad_norm": 0.11669921875,
+ "learning_rate": 0.00019473263538541914,
+ "loss": 1.6649,
+ "step": 21
+ },
+ {
+ "epoch": 0.7394957983193278,
+ "grad_norm": 0.0986328125,
+ "learning_rate": 0.0001937419661134121,
+ "loss": 1.6868,
+ "step": 22
+ },
+ {
+ "epoch": 0.773109243697479,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00019266896074318334,
+ "loss": 1.7762,
+ "step": 23
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "grad_norm": 0.11279296875,
+ "learning_rate": 0.00019151456172430183,
+ "loss": 1.6737,
+ "step": 24
+ },
+ {
+ "epoch": 0.8067226890756303,
+ "eval_loss": 1.7643933296203613,
+ "eval_runtime": 19.6308,
+ "eval_samples_per_second": 2.547,
+ "eval_steps_per_second": 0.357,
+ "step": 24
+ },
+ {
+ "epoch": 0.8403361344537815,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 0.00019027978299657436,
+ "loss": 1.6401,
+ "step": 25
+ },
+ {
+ "epoch": 0.8739495798319328,
+ "grad_norm": 0.099609375,
+ "learning_rate": 0.00018896570909947475,
+ "loss": 1.7068,
+ "step": 26
+ },
+ {
+ "epoch": 0.907563025210084,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.0001875734942195637,
+ "loss": 1.8112,
+ "step": 27
+ },
+ {
+ "epoch": 0.9411764705882353,
+ "grad_norm": 0.1162109375,
+ "learning_rate": 0.00018610436117673555,
+ "loss": 1.6596,
+ "step": 28
+ },
+ {
+ "epoch": 0.9747899159663865,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.0001845596003501826,
+ "loss": 1.7936,
+ "step": 29
+ },
+ {
+ "epoch": 1.0084033613445378,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001829405685450202,
+ "loss": 1.7947,
+ "step": 30
+ },
+ {
+ "epoch": 1.0420168067226891,
+ "grad_norm": 0.20703125,
+ "learning_rate": 0.00018124868780056814,
+ "loss": 1.6887,
+ "step": 31
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 0.00017948544414133534,
+ "loss": 1.6722,
+ "step": 32
+ },
+ {
+ "epoch": 1.0756302521008403,
+ "eval_loss": 1.7600828409194946,
+ "eval_runtime": 19.7105,
+ "eval_samples_per_second": 2.537,
+ "eval_steps_per_second": 0.355,
+ "step": 32
+ },
+ {
+ "epoch": 1.1092436974789917,
+ "grad_norm": 0.09814453125,
+ "learning_rate": 0.00017765238627180424,
+ "loss": 1.7145,
+ "step": 33
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 0.10693359375,
+ "learning_rate": 0.00017575112421616202,
+ "loss": 1.6609,
+ "step": 34
+ },
+ {
+ "epoch": 1.1764705882352942,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00017378332790417273,
+ "loss": 1.6681,
+ "step": 35
+ },
+ {
+ "epoch": 1.2100840336134453,
+ "grad_norm": 0.11767578125,
+ "learning_rate": 0.00017175072570443312,
+ "loss": 1.6641,
+ "step": 36
+ },
+ {
+ "epoch": 1.2436974789915967,
+ "grad_norm": 0.11376953125,
+ "learning_rate": 0.00016965510290629972,
+ "loss": 1.7011,
+ "step": 37
+ },
+ {
+ "epoch": 1.2773109243697478,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00016749830015182107,
+ "loss": 1.7171,
+ "step": 38
+ },
+ {
+ "epoch": 1.3109243697478992,
+ "grad_norm": 0.103515625,
+ "learning_rate": 0.00016528221181905217,
+ "loss": 1.6333,
+ "step": 39
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "grad_norm": 0.111328125,
+ "learning_rate": 0.00016300878435817113,
+ "loss": 1.7162,
+ "step": 40
+ },
+ {
+ "epoch": 1.3445378151260505,
+ "eval_loss": 1.757140040397644,
+ "eval_runtime": 19.6485,
+ "eval_samples_per_second": 2.545,
+ "eval_steps_per_second": 0.356,
+ "step": 40
+ },
+ {
+ "epoch": 1.3781512605042017,
+ "grad_norm": 0.1484375,
+ "learning_rate": 0.00016068001458185936,
+ "loss": 1.6501,
+ "step": 41
+ },
+ {
+ "epoch": 1.4117647058823528,
+ "grad_norm": 0.1240234375,
+ "learning_rate": 0.0001582979479114472,
+ "loss": 1.6446,
+ "step": 42
+ },
+ {
+ "epoch": 1.4453781512605042,
+ "grad_norm": 0.119140625,
+ "learning_rate": 0.00015586467658036524,
+ "loss": 1.7104,
+ "step": 43
+ },
+ {
+ "epoch": 1.4789915966386555,
+ "grad_norm": 0.109375,
+ "learning_rate": 0.0001533823377964791,
+ "loss": 1.6146,
+ "step": 44
+ },
+ {
+ "epoch": 1.5126050420168067,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00015085311186492206,
+ "loss": 1.6448,
+ "step": 45
+ },
+ {
+ "epoch": 1.5462184873949578,
+ "grad_norm": 0.11572265625,
+ "learning_rate": 0.00014827922027307451,
+ "loss": 1.6735,
+ "step": 46
+ },
+ {
+ "epoch": 1.5798319327731094,
+ "grad_norm": 0.1181640625,
+ "learning_rate": 0.0001456629237393713,
+ "loss": 1.6604,
+ "step": 47
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "grad_norm": 0.12060546875,
+ "learning_rate": 0.00014300652022765207,
+ "loss": 1.7046,
+ "step": 48
+ },
+ {
+ "epoch": 1.6134453781512605,
+ "eval_loss": 1.7558497190475464,
+ "eval_runtime": 19.7723,
+ "eval_samples_per_second": 2.529,
+ "eval_steps_per_second": 0.354,
+ "step": 48
+ },
+ {
+ "epoch": 1.6470588235294117,
+ "grad_norm": 0.1259765625,
+ "learning_rate": 0.00014031234292879725,
+ "loss": 1.694,
+ "step": 49
+ },
+ {
+ "epoch": 1.680672268907563,
+ "grad_norm": 0.1220703125,
+ "learning_rate": 0.00013758275821142382,
+ "loss": 1.625,
+ "step": 50
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 0.123046875,
+ "learning_rate": 0.0001348201635434399,
+ "loss": 1.6919,
+ "step": 51
+ },
+ {
+ "epoch": 1.7478991596638656,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.00013202698538628376,
+ "loss": 1.6777,
+ "step": 52
+ },
+ {
+ "epoch": 1.7815126050420167,
+ "grad_norm": 0.12890625,
+ "learning_rate": 0.00012920567706369758,
+ "loss": 1.7762,
+ "step": 53
+ },
+ {
+ "epoch": 1.815126050420168,
+ "grad_norm": 0.126953125,
+ "learning_rate": 0.00012635871660690676,
+ "loss": 1.6668,
+ "step": 54
+ },
+ {
+ "epoch": 1.8487394957983194,
+ "grad_norm": 0.125,
+ "learning_rate": 0.00012348860457809838,
+ "loss": 1.7061,
+ "step": 55
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00012059786187410984,
+ "loss": 1.6714,
+ "step": 56
+ },
+ {
+ "epoch": 1.8823529411764706,
+ "eval_loss": 1.7563551664352417,
+ "eval_runtime": 19.6588,
+ "eval_samples_per_second": 2.543,
+ "eval_steps_per_second": 0.356,
+ "step": 56
+ },
+ {
+ "epoch": 1.9159663865546217,
+ "grad_norm": 0.1279296875,
+ "learning_rate": 0.0001176890275122573,
+ "loss": 1.6318,
+ "step": 57
+ },
+ {
+ "epoch": 1.949579831932773,
+ "grad_norm": 0.1318359375,
+ "learning_rate": 0.00011476465640024814,
+ "loss": 1.6693,
+ "step": 58
+ },
+ {
+ "epoch": 1.9831932773109244,
+ "grad_norm": 0.12353515625,
+ "learning_rate": 0.00011182731709213659,
+ "loss": 1.5927,
+ "step": 59
+ },
+ {
+ "epoch": 2.0168067226890756,
+ "grad_norm": 0.138671875,
+ "learning_rate": 0.00010887958953229349,
+ "loss": 1.6558,
+ "step": 60
+ },
+ {
+ "epoch": 2.0504201680672267,
+ "grad_norm": 0.134765625,
+ "learning_rate": 0.00010592406278937144,
+ "loss": 1.7352,
+ "step": 61
+ },
+ {
+ "epoch": 2.0840336134453783,
+ "grad_norm": 0.1328125,
+ "learning_rate": 0.00010296333278225599,
+ "loss": 1.6216,
+ "step": 62
+ },
+ {
+ "epoch": 2.1176470588235294,
+ "grad_norm": 0.1201171875,
+ "learning_rate": 0.0001,
+ "loss": 1.6365,
+ "step": 63
+ },
+ {
+ "epoch": 2.1512605042016806,
+ "grad_norm": 0.1298828125,
+ "learning_rate": 9.703666721774402e-05,
+ "loss": 1.6249,
+ "step": 64
+ },
+ {
+ "epoch": 2.1512605042016806,
+ "eval_loss": 1.756639838218689,
+ "eval_runtime": 19.5951,
+ "eval_samples_per_second": 2.552,
+ "eval_steps_per_second": 0.357,
+ "step": 64
+ },
+ {
+ "epoch": 2.184873949579832,
+ "grad_norm": 0.1357421875,
+ "learning_rate": 9.407593721062859e-05,
+ "loss": 1.653,
+ "step": 65
+ },
+ {
+ "epoch": 2.2184873949579833,
+ "grad_norm": 0.146484375,
+ "learning_rate": 9.112041046770653e-05,
+ "loss": 1.6545,
+ "step": 66
+ },
+ {
+ "epoch": 2.2521008403361344,
+ "grad_norm": 0.13671875,
+ "learning_rate": 8.817268290786343e-05,
+ "loss": 1.5787,
+ "step": 67
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 0.1328125,
+ "learning_rate": 8.523534359975189e-05,
+ "loss": 1.6532,
+ "step": 68
+ },
+ {
+ "epoch": 2.3193277310924367,
+ "grad_norm": 0.1396484375,
+ "learning_rate": 8.231097248774274e-05,
+ "loss": 1.6784,
+ "step": 69
+ },
+ {
+ "epoch": 2.3529411764705883,
+ "grad_norm": 0.1337890625,
+ "learning_rate": 7.940213812589018e-05,
+ "loss": 1.5721,
+ "step": 70
+ },
+ {
+ "epoch": 2.3865546218487395,
+ "grad_norm": 0.1416015625,
+ "learning_rate": 7.651139542190164e-05,
+ "loss": 1.5836,
+ "step": 71
+ },
+ {
+ "epoch": 2.4201680672268906,
+ "grad_norm": 0.146484375,
+ "learning_rate": 7.364128339309326e-05,
+ "loss": 1.5604,
+ "step": 72
+ },
+ {
+ "epoch": 2.4201680672268906,
+ "eval_loss": 1.7598735094070435,
+ "eval_runtime": 19.7508,
+ "eval_samples_per_second": 2.532,
+ "eval_steps_per_second": 0.354,
+ "step": 72
+ },
+ {
+ "epoch": 2.453781512605042,
+ "grad_norm": 0.1455078125,
+ "learning_rate": 7.079432293630244e-05,
+ "loss": 1.6259,
+ "step": 73
+ },
+ {
+ "epoch": 2.4873949579831933,
+ "grad_norm": 0.1484375,
+ "learning_rate": 6.797301461371625e-05,
+ "loss": 1.5811,
+ "step": 74
+ },
+ {
+ "epoch": 2.5210084033613445,
+ "grad_norm": 0.14453125,
+ "learning_rate": 6.517983645656014e-05,
+ "loss": 1.4929,
+ "step": 75
+ },
+ {
+ "epoch": 2.5546218487394956,
+ "grad_norm": 0.1572265625,
+ "learning_rate": 6.24172417885762e-05,
+ "loss": 1.7014,
+ "step": 76
+ },
+ {
+ "epoch": 2.588235294117647,
+ "grad_norm": 0.1484375,
+ "learning_rate": 5.96876570712028e-05,
+ "loss": 1.5623,
+ "step": 77
+ },
+ {
+ "epoch": 2.6218487394957983,
+ "grad_norm": 0.1474609375,
+ "learning_rate": 5.699347977234799e-05,
+ "loss": 1.6006,
+ "step": 78
+ },
+ {
+ "epoch": 2.6554621848739495,
+ "grad_norm": 0.150390625,
+ "learning_rate": 5.43370762606287e-05,
+ "loss": 1.6641,
+ "step": 79
+ },
+ {
+ "epoch": 2.689075630252101,
+ "grad_norm": 0.15234375,
+ "learning_rate": 5.172077972692553e-05,
+ "loss": 1.7003,
+ "step": 80
+ },
+ {
+ "epoch": 2.689075630252101,
+ "eval_loss": 1.761399269104004,
+ "eval_runtime": 19.6692,
+ "eval_samples_per_second": 2.542,
+ "eval_steps_per_second": 0.356,
+ "step": 80
+ },
+ {
+ "epoch": 2.722689075630252,
+ "grad_norm": 0.154296875,
+ "learning_rate": 4.914688813507797e-05,
+ "loss": 1.6923,
+ "step": 81
+ },
+ {
+ "epoch": 2.7563025210084033,
+ "grad_norm": 0.158203125,
+ "learning_rate": 4.661766220352097e-05,
+ "loss": 1.6819,
+ "step": 82
+ },
+ {
+ "epoch": 2.7899159663865545,
+ "grad_norm": 0.1513671875,
+ "learning_rate": 4.4135323419634766e-05,
+ "loss": 1.5649,
+ "step": 83
+ },
+ {
+ "epoch": 2.8235294117647056,
+ "grad_norm": 0.1494140625,
+ "learning_rate": 4.170205208855281e-05,
+ "loss": 1.608,
+ "step": 84
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 0.1611328125,
+ "learning_rate": 3.931998541814069e-05,
+ "loss": 1.5474,
+ "step": 85
+ },
+ {
+ "epoch": 2.8907563025210083,
+ "grad_norm": 0.1552734375,
+ "learning_rate": 3.69912156418289e-05,
+ "loss": 1.6484,
+ "step": 86
+ },
+ {
+ "epoch": 2.92436974789916,
+ "grad_norm": 0.1484375,
+ "learning_rate": 3.471778818094785e-05,
+ "loss": 1.6145,
+ "step": 87
+ }
+ ],
+ "logging_steps": 1,
+ "max_steps": 116,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 4,
+ "save_steps": 29,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 9933851341357056.0,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/content/outputs/qlora-out/checkpoint-87/training_args.bin b/content/outputs/qlora-out/checkpoint-87/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c07aacc14ae35e4961f88179b331f061deef6ee1
--- /dev/null
+++ b/content/outputs/qlora-out/checkpoint-87/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f22338230e6fb5499b3cb737d6ee991fc1b36ba21747e343dbc66d770479d2b
+size 5944
diff --git a/content/outputs/qlora-out/config.json b/content/outputs/qlora-out/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..4fd46aded17c1e2a61e180b1bb90c4097c007cd9
--- /dev/null
+++ b/content/outputs/qlora-out/config.json
@@ -0,0 +1,44 @@
+{
+ "_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
+ "architectures": [
+ "LlamaForCausalLM"
+ ],
+ "attention_bias": false,
+ "attention_dropout": 0.0,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_act": "silu",
+ "hidden_size": 2048,
+ "initializer_range": 0.02,
+ "intermediate_size": 5632,
+ "max_position_embeddings": 4096,
+ "mlp_bias": false,
+ "model_type": "llama",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 22,
+ "num_key_value_heads": 4,
+ "pretraining_tp": 1,
+ "quantization_config": {
+ "_load_in_4bit": true,
+ "_load_in_8bit": false,
+ "bnb_4bit_compute_dtype": "float32",
+ "bnb_4bit_quant_storage": "bfloat16",
+ "bnb_4bit_quant_type": "nf4",
+ "bnb_4bit_use_double_quant": true,
+ "llm_int8_enable_fp32_cpu_offload": false,
+ "llm_int8_has_fp16_weight": false,
+ "llm_int8_skip_modules": null,
+ "llm_int8_threshold": 6.0,
+ "load_in_4bit": true,
+ "load_in_8bit": false,
+ "quant_method": "bitsandbytes"
+ },
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 10000.0,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.41.1",
+ "use_cache": false,
+ "vocab_size": 32000
+}
diff --git a/content/outputs/qlora-out/special_tokens_map.json b/content/outputs/qlora-out/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/content/outputs/qlora-out/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/content/outputs/qlora-out/tokenizer.model b/content/outputs/qlora-out/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/content/outputs/qlora-out/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/content/outputs/qlora-out/tokenizer_config.json b/content/outputs/qlora-out/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/content/outputs/qlora-out/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.model b/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..0773857a13ba5a27453a0b462624fe76e8e82a86
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": true,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": false,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false,
+ "use_fast": true
+}