{ "dataset.debug": false, "dataset.git_diff": "", "dataset.git_sha1": "unknown", "dataset.manual_sample_ids": [], "dataset.max_read_items": null, "dataset.output_dir": "output", "dataset.path": "/mnt/hg_cache/temp_llama_dataset/datasets/ds_Llama-2-7b-chat-hf", "dataset.read_eagle_format": false, "dataset.run_name": "temp_run", "dataset.seed": 42, "dataset_generation.batch_size": 1, "dataset_generation.debug": false, "dataset_generation.debug_target": null, "dataset_generation.ds_prefix": "ds_", "dataset_generation.git_diff": "", "dataset_generation.git_sha1": "unknown", "dataset_generation.max_length": 4096, "dataset_generation.output_dir": "output", "dataset_generation.run_name": "temp_run", "dataset_generation.save_every": 1000, "dataset_generation.seed": 42, "inference.debug": false, "inference.detail_time_stats": false, "inference.draft_tree_shape": "mc_sim_7b_65", "inference.git_diff": "", "inference.git_sha1": "unknown", "inference.interactive": false, "inference.max_new_tokens": 512, "inference.mode": "speculative", "inference.output_dir": "output", "inference.run_name": "temp_run", "inference.seed": 42, "modeling.add_noise": true, "modeling.attention_offset": "random.randrange(0, 3)", "modeling.attention_wind": "5", "modeling.ckpt_path": null, "modeling.debug": false, "modeling.decoder_key_remap": {}, "modeling.draft_growing": false, "modeling.dtype": "torch.float32", "modeling.frozen_targets": [], "modeling.git_diff": "", "modeling.git_sha1": "unknown", "modeling.layer_path": "model.layers", "modeling.lmhead_path": "lm_head", "modeling.load_config_from_model_path": false, "modeling.model_path": "beagle/models/llama2/Llama-2-7b-chat-hf/", "modeling.norm_path": "model.norm", "modeling.output_dir": "output", "modeling.reuse_layer": null, "modeling.rotary_path": "model.rotary_emb", "modeling.run_name": "temp_run", "modeling.save_loading": true, "modeling.seed": 42, "modeling.strictly_follow_eagle_decoder": false, "modeling.tokenizer_path": "meta-llama/Llama-2-7b-chat-hf", "modeling.use_fc_eagle": false, "modeling.use_lower_layers": 0, "modeling.use_state_distill": false, "training.adam_beta1": 0.9, "training.adam_beta2": 0.95, "training.bf16": true, "training.ddp_find_unused_parameters": false, "training.debug": false, "training.eval_steps": 100, "training.eval_strategy": "steps", "training.filter_out_shorts": false, "training.git_diff": "", "training.git_sha1": "5fc4490fcf6df905c32d0391251aba155c81b593", "training.gradient_accumulation_steps": 4, "training.learning_rate": 3e-05, "training.logging_steps": 1, "training.lr_scheduler_type": "constant_with_warmup", "training.max_grad_norm": 0.5, "training.max_length": 4096, "training.max_steps": -1, "training.model_init_ckpt": null, "training.num_train_epochs": 10, "training.optim": "adamw_torch_fused", "training.output_dir": "output", "training.overwrite_output_dir": true, "training.per_device_eval_batch_size": 1, "training.per_device_train_batch_size": 1, "training.project": "beagle", "training.report_to": "wandb", "training.resume_from_checkpoint": false, "training.resume_wandb_runid": null, "training.run_name": "temp_run", "training.save_steps": 500, "training.save_strategy": "steps", "training.save_total_limit": 2, "training.save_vram": true, "training.seed": 42, "training.tf32": false, "training.use_eagle_pipeline": false, "training.warmup_steps": 2000 }