model_family: llama2-7b LoRA: r: 0 alpha: 32 dropout: 0.05 dataset: ZSRE data_path: /data/user/whz/machine_unlearning/our_bench/data/ZSRE/finetune.json split: full batch_size: 4 gradient_accumulation_steps: 8 num_epochs: 10 lr: 3.0e-05 bf16: true save_dir: /data/user/whz/machine_unlearning/our_bench/save_model/ZSRE/finetune_llama2-7b_B4_G8_E10_lr3e-5_2 weight_decay: 0.01 seed: 42 ds_size: null