File size: 751 Bytes
ce24f5e 77762a5 ce24f5e e50a64e ce24f5e 949a27b 77762a5 ce24f5e 8d959a7 77762a5 8d959a7 77762a5 7019509 ce24f5e 05fffb5 abddcf4 77762a5 c2a0792 8d959a7 77762a5 949a27b ce24f5e 8d959a7 ace70b3 d1aed4c ce24f5e 77762a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
base_model: EleutherAI/pythia-1.4b-deduped
load_in_8bit: true
datasets:
- path: teknium/GPT4-LLM-Cleaned
type: alpaca
dataset_prepared_path:
val_set_size: 0.05
adapter: lora
lora_model_dir:
sequence_len: 512
lora_r: 16
lora_alpha: 32
lora_dropout: 0.05
lora_target_modules:
- query_key_value
lora_target_linear:
lora_fan_in_fan_out: true # pythia/GPTNeoX lora specific
wandb_project:
wandb_entity:
wandb_watch:
wandb_run_id:
wandb_log_model:
output_dir: ./lora-alpaca-pythia
gradient_accumulation_steps: 1
micro_batch_size: 4
num_epochs: 3
learning_rate: 0.00001
train_on_inputs: false
group_by_length: false
bf16: true
tf32: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
weight_decay: 0.1
eval_steps: 20
logging_steps: 1
|