good-music-finished / beagle.json
w32zhong's picture
Upload folder using huggingface_hub
a556ab3 verified
{
"dataset.debug": false,
"dataset.git_diff": "",
"dataset.git_sha1": "unknown",
"dataset.manual_sample_ids": [],
"dataset.max_read_items": null,
"dataset.output_dir": "output",
"dataset.path": "/mnt/hg_cache/temp_llama_dataset/datasets/ds_Llama-2-7b-chat-hf",
"dataset.read_eagle_format": false,
"dataset.run_name": "temp_run",
"dataset.seed": 42,
"dataset_generation.batch_size": 1,
"dataset_generation.debug": false,
"dataset_generation.debug_target": null,
"dataset_generation.ds_prefix": "ds_",
"dataset_generation.git_diff": "",
"dataset_generation.git_sha1": "unknown",
"dataset_generation.max_length": 4096,
"dataset_generation.output_dir": "output",
"dataset_generation.run_name": "temp_run",
"dataset_generation.save_every": 1000,
"dataset_generation.seed": 42,
"inference.debug": false,
"inference.detail_time_stats": false,
"inference.draft_tree_shape": "mc_sim_7b_64",
"inference.git_diff": "",
"inference.git_sha1": "unknown",
"inference.interactive": false,
"inference.max_new_tokens": 512,
"inference.mode": "speculative",
"inference.output_dir": "output",
"inference.run_name": "temp_run",
"inference.seed": 42,
"modeling.add_noise": true,
"modeling.attention_offset": "random.randrange(0, 3)",
"modeling.attention_wind": "5",
"modeling.ckpt_path": null,
"modeling.debug": false,
"modeling.decoder_key_remap": {},
"modeling.draft_growing": false,
"modeling.dtype": "torch.float32",
"modeling.frozen_targets": [],
"modeling.git_diff": "",
"modeling.git_sha1": "unknown",
"modeling.layer_path": "model.layers",
"modeling.lmhead_path": "lm_head",
"modeling.load_config_from_model_path": false,
"modeling.model_path": "beagle/models/llama2/Llama-2-7b-chat-hf/",
"modeling.norm_path": "model.norm",
"modeling.output_dir": "output",
"modeling.reuse_layer": null,
"modeling.rotary_path": "model.rotary_emb",
"modeling.run_name": "temp_run",
"modeling.save_loading": true,
"modeling.seed": 42,
"modeling.strictly_follow_eagle_decoder": true,
"modeling.tokenizer_path": "meta-llama/Llama-2-7b-chat-hf",
"modeling.use_fc_eagle": false,
"modeling.use_lower_layers": 0,
"modeling.use_state_distill": false,
"training.adam_beta1": 0.9,
"training.adam_beta2": 0.95,
"training.bf16": true,
"training.ddp_find_unused_parameters": false,
"training.debug": false,
"training.eval_steps": 100,
"training.eval_strategy": "steps",
"training.filter_out_shorts": false,
"training.git_diff": "diff --git a/README.md b/README.md\nindex 906761d..da3f70e 100644\n--- a/README.md\n+++ b/README.md\n@@ -21,13 +21,14 @@ Training:\n ```sh\n wandb login\n # multi-GPU training:\n-torchrun --standalone --nnodes=1 --nproc-per-node=2 -m beagle.train \\\n+CUDA_VISIBLE_DEVICES=0,1,2,3 \\\n+ torchrun --standalone --nnodes=1 --nproc-per-node=4 -m beagle.train \\\n --@llama2_7b_chat --@rtx4070tis_bs4_ctx4096 --modeling.use_lower_layers 0 \\\n --dataset.path /mnt/hg_cache/temp_llama_dataset/datasets/ds_Llama-2-7b-chat-hf \\\n --modeling.save_loading --training.report_to wandb\n \n # customized training:\n-CUDA_VISIBLE_DEVICES=1,2 \\\n+CUDA_VISIBLE_DEVICES=0,1 \\\n torchrun --standalone --nnodes=1 --nproc-per-node=2 -m beagle.train \\\n [email protected]_2.4B_instr --@rtx4070tis_dev_bs8 \\\n --dataset.path /mnt/truenas_sync/beagle_train_data/datasets/ds_EXAONE-3.5-2.4B-Instruct \\\ndiff --git a/beagle/train.py b/beagle/train.py\nindex 74795e1..c25096e 100644\n--- a/beagle/train.py\n+++ b/beagle/train.py\n@@ -199,8 +199,6 @@ def train(configs, run_name, tokenizer, model,\n if not configs.overwrite_output_dir:\n assert not os.path.exists(configs.output_dir), configs.output_dir\n \n- print('[rank]', rank, '/', world_size)\n-\n random.seed(configs.seed)\n torch.manual_seed(configs.seed)\n \n@@ -326,6 +324,9 @@ def main(config_file='beagle/configs.ini', **injects):\n configs.set_obj('training.git_diff', git_diff)\n master_print('[git]', git_sha1, '\\n', git_diff)\n \n+ print('[rank]', rank, '/', world_size)\n+ configs.set_obj('training.world_size', world_size)\n+\n if rank == 0 and configs.training.report_to == 'wandb':\n import wandb\n wandb.init(",
"training.git_sha1": "453d65d9cbee2ac0dedb03c3c49d99b1344a81ed",
"training.gradient_accumulation_steps": 8,
"training.learning_rate": 3e-05,
"training.logging_steps": 5,
"training.lr_scheduler_type": "constant_with_warmup",
"training.max_grad_norm": 0.5,
"training.max_length": 4096,
"training.max_steps": -1,
"training.model_init_ckpt": null,
"training.num_train_epochs": 10,
"training.optim": "adamw_torch_fused",
"training.output_dir": "output",
"training.overwrite_output_dir": true,
"training.per_device_eval_batch_size": 1,
"training.per_device_train_batch_size": 2,
"training.project": "beagle",
"training.report_to": "wandb",
"training.resume_from_checkpoint": false,
"training.resume_wandb_runid": null,
"training.run_name": "temp_run",
"training.save_steps": 500,
"training.save_strategy": "steps",
"training.save_total_limit": 2,
"training.save_vram": true,
"training.seed": 42,
"training.tf32": false,
"training.use_eagle_pipeline": false,
"training.warmup_steps": 2000,
"training.world_size": 1
}