|
--- |
|
license: gemma |
|
datasets: |
|
- kalomaze/Opus_Instruct_25k |
|
base_model: google/gemma-2-2b-it |
|
--- |
|
|
|
![image/png](https://cdn-uploads.huggingface.co/production/uploads/660e67afe23148df7ca321a5/AKOTTIQeLVokbsp_Lm7gP.png) |
|
|
|
Must put image in repo :3 |
|
|
|
# Basic info |
|
This is [kalomaze/Opus_Instruct_25k](https://huggingface.co/datasets/kalomaze/Opus_Instruct_25k) over [unsloth/gemma-2-2b-it](https://huggingface.co/unsloth/gemma-2-2b-it) |
|
|
|
It saw 39.5M tokens |
|
|
|
I have no idea if it's done right but it took 9 hours B) |
|
|
|
# Training config: |
|
``` |
|
cutoff_len: 1024 |
|
dataset: Opus_Instruct_25K |
|
dataset_dir: data |
|
ddp_timeout: 180000000 |
|
do_train: true |
|
finetuning_type: lora |
|
flash_attn: auto |
|
fp16: true |
|
gradient_accumulation_steps: 8 |
|
include_num_input_tokens_seen: true |
|
learning_rate: 5.0e-05 |
|
logging_steps: 5 |
|
lora_alpha: 32 |
|
lora_dropout: 0 |
|
lora_rank: 32 |
|
lora_target: all |
|
lr_scheduler_type: cosine |
|
max_grad_norm: 1.0 |
|
max_samples: 15000 |
|
model_name_or_path: unsloth/gemma-2-2b-it |
|
num_train_epochs: 3.0 |
|
optim: adamw_8bit |
|
output_dir: saves/Gemma-2-2B-Chat/lora/Final_Opus |
|
packing: false |
|
per_device_train_batch_size: 2 |
|
plot_loss: true |
|
preprocessing_num_workers: 16 |
|
quantization_bit: 4 |
|
quantization_method: bitsandbytes |
|
report_to: none |
|
save_steps: 100 |
|
stage: sft |
|
template: gemma |
|
use_unsloth: true |
|
warmup_steps: 0 |
|
``` |