diff --git a/configs/config_1.07G_dp1_tp8_pp1_acc1_mbs1_seq16384_zero0_tpmodeRED_l15_h2048_heads16.yaml b/configs/config_1.07G_dp1_tp8_pp1_acc1_mbs1_seq16384_zero0_tpmodeRED_l15_h2048_heads16.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c4ead841e1f39389ec0909be710e36a4121a7582 --- /dev/null +++ b/configs/config_1.07G_dp1_tp8_pp1_acc1_mbs1_seq16384_zero0_tpmodeRED_l15_h2048_heads16.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.07G_dp1_tp8_pp1_acc1_mbs1_seq16384_zero0_tpmodeRED_l15_h2048_heads16 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 16384 + num_attention_heads: 16 + num_hidden_layers: 15 + num_key_value_heads: 16 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 16384 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp16_tp8_pp1_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp16_tp8_pp1_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d4f13fb87142fe957da46cc0b775f2cd9184a8e9 --- /dev/null +++ b/configs/config_1.14G_dp16_tp8_pp1_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp16_tp8_pp1_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp128_pp1_acc64_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp128_pp1_acc64_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62c4c3926712f792e95645fdcd7fd2b2e5f1a3ef --- /dev/null +++ b/configs/config_1.14G_dp2_tp128_pp1_acc64_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp128_pp1_acc64_mbs1_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp1_pp4_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp1_pp4_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e26168dcaa29f3e9943dfe31b76895b462ce0ae4 --- /dev/null +++ b/configs/config_1.14G_dp2_tp1_pp4_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp1_pp4_acc32_mbs2_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp4_pp1_acc128_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp4_pp1_acc128_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b593faffdebbf4df50ccf310a29e852b6c3ec031 --- /dev/null +++ b/configs/config_1.14G_dp2_tp4_pp1_acc128_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp4_pp1_acc128_mbs2_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp4_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp4_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d7851554992ede3d4c973fe93b44f6a06ecd0f2f --- /dev/null +++ b/configs/config_1.14G_dp2_tp4_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp4_pp1_acc4_mbs16_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp32_tp16_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp32_tp16_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9916069458d5d057f6e3a4df0219cca9debdd5ff --- /dev/null +++ b/configs/config_1.14G_dp32_tp16_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp32_tp16_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp32_tp8_pp1_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp32_tp8_pp1_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..965424c1f72b8f65e3b062de1d2edc41942c248f --- /dev/null +++ b/configs/config_1.14G_dp32_tp8_pp1_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp32_tp8_pp1_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp128_pp1_acc4_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp128_pp1_acc4_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c24c0b849e01a6b19b1e1529ccdc0d3fea3b66bc --- /dev/null +++ b/configs/config_1.14G_dp4_tp128_pp1_acc4_mbs32_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp128_pp1_acc4_mbs32_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d3db89bccbb5067918000fef6925eee3c282b361 --- /dev/null +++ b/configs/config_1.14G_dp4_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f275bd4b5930b8eeff329396db94be76f5e682d5 --- /dev/null +++ b/configs/config_1.14G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp64_pp1_acc64_mbs2_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61ac8758a598ab17b4c5cb2632d81a594d42d583 --- /dev/null +++ b/configs/config_1.14G_dp4_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp8_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp8_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b4b082f6685438c88c605ed9286e0f576e083e09 --- /dev/null +++ b/configs/config_1.14G_dp4_tp8_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp8_pp1_acc2_mbs256_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp64_tp8_pp1_acc4_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp64_tp8_pp1_acc4_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d729b3eebd70fe8cb324f1b1d1245be6bc146714 --- /dev/null +++ b/configs/config_1.14G_dp64_tp8_pp1_acc4_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp64_tp8_pp1_acc4_mbs2_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf7c8dab5af5793d5a5fc6054c0be9c4b3883ea8 --- /dev/null +++ b/configs/config_1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp16_pp1_acc1_mbs16_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c0414e8154913d68dd5875f41171af55d641b2d --- /dev/null +++ b/configs/config_1.14G_dp8_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp16_pp1_acc4_mbs4_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp1_pp2_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp1_pp2_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c382d6628b551dbed218f0fbbbd1207a25bf18e --- /dev/null +++ b/configs/config_1.14G_dp8_tp1_pp2_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp1_pp2_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp32_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp8_tp32_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf7b8258e0425c7a3c80237260d0c6d86fe1b3dc --- /dev/null +++ b/configs/config_1.14G_dp8_tp32_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp32_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp4_pp1_acc1_mbs64_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp4_pp1_acc1_mbs64_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..abc55e839bcea253182b72d27db6839ff6eb8ac4 --- /dev/null +++ b/configs/config_1.14G_dp8_tp4_pp1_acc1_mbs64_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp4_pp1_acc1_mbs64_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp4_pp1_acc32_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp4_pp1_acc32_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a010f23effc058d75e170932ed750b7595c9034b --- /dev/null +++ b/configs/config_1.14G_dp8_tp4_pp1_acc32_mbs8_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp4_pp1_acc32_mbs8_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp128_tp4_pp1_acc1_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp128_tp4_pp1_acc1_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ac13334c09f6f4ee3d71e1072b34e4891fd3b40 --- /dev/null +++ b/configs/config_1.34G_dp128_tp4_pp1_acc1_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp128_tp4_pp1_acc1_mbs1_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp1_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp1_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e89ec1beda61784ca7ff5bcb50653012597143f2 --- /dev/null +++ b/configs/config_1.34G_dp16_tp1_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp1_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp1_pp2_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp1_pp2_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..567a9b3b01934f09ca49cb83c5291bd77eedde97 --- /dev/null +++ b/configs/config_1.34G_dp16_tp1_pp2_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp1_pp2_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp2_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp2_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22ef4574f346c96c67c3615fa972af6719262004 --- /dev/null +++ b/configs/config_1.34G_dp16_tp2_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp2_pp1_acc2_mbs1_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f31f4d24d96cbafebded9bcde716e06735c70faa --- /dev/null +++ b/configs/config_1.34G_dp16_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp2_pp1_acc2_mbs64_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp32_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp32_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..03cc51bf1cde54af82a9df4aa72472637adf72bb --- /dev/null +++ b/configs/config_1.34G_dp16_tp32_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp32_pp1_acc2_mbs16_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp1_tp4_pp4_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp1_tp4_pp4_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9430007cb37720cb4b94bd0f7ff56dfd400238a8 --- /dev/null +++ b/configs/config_1.34G_dp1_tp4_pp4_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp1_tp4_pp4_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp16_pp1_acc32_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp16_pp1_acc32_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..67b486fbe7564a718aeb51b111192b191e82390a --- /dev/null +++ b/configs/config_1.34G_dp2_tp16_pp1_acc32_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp16_pp1_acc32_mbs8_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp32_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp32_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a71fbf4d741408ff4fa77ad224d39e32e78752ec --- /dev/null +++ b/configs/config_1.34G_dp2_tp32_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp32_pp1_acc2_mbs128_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp4_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp4_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..330c09a4754dd7efe6d00e7dc9e27ee78a9eaa70 --- /dev/null +++ b/configs/config_1.34G_dp2_tp4_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp4_pp1_acc64_mbs16_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..346bbbf3e501a27d56a5def0abfed4c836823073 --- /dev/null +++ b/configs/config_1.34G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp8_pp1_acc4_mbs256_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp16_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp32_tp16_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6343b7b1bf230468634e1ba6b919d9b6d6392668 --- /dev/null +++ b/configs/config_1.34G_dp32_tp16_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp16_pp1_acc2_mbs8_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp8_pp1_acc1_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp32_tp8_pp1_acc1_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..526deedb7d15bb0fce919b60b7848d11b99538d0 --- /dev/null +++ b/configs/config_1.34G_dp32_tp8_pp1_acc1_mbs64_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp8_pp1_acc1_mbs64_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp32_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b5ca6886368779916fcbc43c3195fa95a492ee40 --- /dev/null +++ b/configs/config_1.34G_dp32_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp8_pp1_acc2_mbs2_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp16_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp16_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61a2f5be4fa115d9c1faa75ec87657def2c007aa --- /dev/null +++ b/configs/config_1.34G_dp4_tp16_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp16_pp1_acc4_mbs8_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp1_pp2_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp1_pp2_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cd6d07da3960d64db1fdae6c33d0d48bc7d047f2 --- /dev/null +++ b/configs/config_1.34G_dp4_tp1_pp2_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp1_pp2_acc256_mbs2_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp2_pp1_acc16_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp2_pp1_acc16_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..56058fecb1c12e329221799b0169d393ef3c09aa --- /dev/null +++ b/configs/config_1.34G_dp4_tp2_pp1_acc16_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp2_pp1_acc16_mbs8_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp32_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp32_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..44abaee7a16dab8370ada4dc50089576d69e2077 --- /dev/null +++ b/configs/config_1.34G_dp4_tp32_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp32_pp1_acc16_mbs2_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp32_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp32_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0cbf3d892b67947ab10b62fd04404280fd7ed40a --- /dev/null +++ b/configs/config_1.34G_dp4_tp32_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp32_pp1_acc2_mbs16_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp32_pp1_acc4_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp32_pp1_acc4_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..04888a4a7ae49338a17e44ceb4014395a37b1605 --- /dev/null +++ b/configs/config_1.34G_dp4_tp32_pp1_acc4_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp32_pp1_acc4_mbs2_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp32_pp2_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp32_pp2_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89f39563eac23feafddebb733ba6c12468e02163 --- /dev/null +++ b/configs/config_1.34G_dp4_tp32_pp2_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp32_pp2_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp64_tp2_pp1_acc8_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp64_tp2_pp1_acc8_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0dd7b1b7ed18394820446b0ca5b30187c58c27b6 --- /dev/null +++ b/configs/config_1.34G_dp64_tp2_pp1_acc8_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp64_tp2_pp1_acc8_mbs1_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp64_tp8_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp64_tp8_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c8a3e19d373973e05f379af35a30e1b93bf2eea --- /dev/null +++ b/configs/config_1.34G_dp64_tp8_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp64_tp8_pp1_acc32_mbs1_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp1_pp1_acc16_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp1_pp1_acc16_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4782ca9f565a07161a6c4146fc580c9409017ba3 --- /dev/null +++ b/configs/config_1.34G_dp8_tp1_pp1_acc16_mbs2_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp1_pp1_acc16_mbs2_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3dce8aa0deaed7162a54b53ad0cdc5d88b26f41c --- /dev/null +++ b/configs/config_1.34G_dp8_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp1_pp8_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp2_pp1_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp2_pp1_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c678763417dba997bc3dbc6cefe0835a7a6f2d05 --- /dev/null +++ b/configs/config_1.34G_dp8_tp2_pp1_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp2_pp1_acc2_mbs32_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp2_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp2_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..154012e43ac865ca6358a7f272ed847f38560790 --- /dev/null +++ b/configs/config_1.34G_dp8_tp2_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp2_pp1_acc8_mbs2_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp32_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp32_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a36062c753032ed3a8e555629139bd4c8142f80 --- /dev/null +++ b/configs/config_1.34G_dp8_tp32_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp32_pp1_acc1_mbs4_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp64_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp64_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..687ccb90ed74929f77a61fa9d962db3313f7f4c8 --- /dev/null +++ b/configs/config_1.34G_dp8_tp64_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp64_pp1_acc8_mbs8_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp8_pp1_acc1_mbs256_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp8_pp1_acc1_mbs256_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee71298b341a4bfab567c390c52890ad1ebc3f91 --- /dev/null +++ b/configs/config_1.34G_dp8_tp8_pp1_acc1_mbs256_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp8_pp1_acc1_mbs256_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp2_tp32_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp2_tp32_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1fe41c9a2e6955b072eba16142422b02b0b9399a --- /dev/null +++ b/configs/config_3.57G_dp2_tp32_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp2_tp32_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d060fdbb0cd3abb7453db81279a1ba8eda1a0234 --- /dev/null +++ b/configs/config_3.57G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp8_tp4_pp1_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp8_tp4_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp8_tp4_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b35affbbefa708b0b7d049d011504d4d48566497 --- /dev/null +++ b/configs/config_3.57G_dp8_tp4_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp8_tp4_pp8_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp8_tp8_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp8_tp8_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..83ed1c0421579a0499da1b16e40fe162b738c8dc --- /dev/null +++ b/configs/config_3.57G_dp8_tp8_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp8_tp8_pp1_acc2_mbs16_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp8_tp8_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp8_tp8_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..345f621be7da3eede36ef3b36bd052ca1ab4ac92 --- /dev/null +++ b/configs/config_3.57G_dp8_tp8_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp8_tp8_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp2_tp16_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp2_tp16_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e01ece5de8e8de46cafb36915cedd3bc94569095 --- /dev/null +++ b/configs/config_469G_dp2_tp16_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp2_tp16_pp2_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53537ec7a33973ee12a26c6ab10c11fef55c5852 --- /dev/null +++ b/configs/config_469G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp32_tp1_pp1_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp4_tp2_pp2_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp4_tp2_pp2_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e86a3b8ee001ff8c80ab866221bd9c5ded71c92d --- /dev/null +++ b/configs/config_469G_dp4_tp2_pp2_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp4_tp2_pp2_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp4_tp4_pp32_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp4_tp4_pp32_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e19e77f216b815ccc591bf796d2fe1d98f2a0792 --- /dev/null +++ b/configs/config_469G_dp4_tp4_pp32_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp4_tp4_pp32_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_5.5G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml b/configs/config_5.5G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c7f8227b965e36e85c27c950aa1fc95e1c5a107 --- /dev/null +++ b/configs/config_5.5G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 5.5G_dp8_tp2_pp32_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp1_tp16_pp4_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp1_tp16_pp4_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..793e62c14b9a950c8fb5d2a11a5236b111c8d3e9 --- /dev/null +++ b/configs/config_8.86G_dp1_tp16_pp4_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp1_tp16_pp4_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp2_tp2_pp8_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp2_tp2_pp8_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..74ab879ef3e3ac56728fa68c29ee9965518a1607 --- /dev/null +++ b/configs/config_8.86G_dp2_tp2_pp8_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp2_tp2_pp8_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp2_tp8_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp2_tp8_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f4f0c05922c7f10606ab187cb90a536d1bd82e36 --- /dev/null +++ b/configs/config_8.86G_dp2_tp8_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp2_tp8_pp8_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba3b546e821c571df0bada291d18e50af36cb989 --- /dev/null +++ b/configs/config_8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp4_tp2_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc63348b7f0f7c09ac5c43a7ea0ac4f935fa0641 --- /dev/null +++ b/configs/config_8.86G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp8_tp16_pp1_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp8_tp2_pp1_acc16.0_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp8_tp2_pp1_acc16.0_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed5b6848e193649a78b55030a3c2d271ab64f99f --- /dev/null +++ b/configs/config_8.86G_dp8_tp2_pp1_acc16.0_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp8_tp2_pp1_acc16.0_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16.0 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5966aa07b7a0f9e5424d929b904936c97e9a4fd2 --- /dev/null +++ b/configs/config_80G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp1_tp16_pp2_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp1_tp16_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp1_tp16_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ef7afd272b7545f80d3a6dd00d5ba07ed2aaf2c5 --- /dev/null +++ b/configs/config_80G_dp1_tp16_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp1_tp16_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp2_tp4_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp2_tp4_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d7e1e6b82acc364803f1483610614eb5a6ffb1a --- /dev/null +++ b/configs/config_80G_dp2_tp4_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp2_tp4_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp2_tp8_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp2_tp8_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..530b462b3da8d1e642853c6577345c723f6115d2 --- /dev/null +++ b/configs/config_80G_dp2_tp8_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp2_tp8_pp16_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 16 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp4_tp2_pp1_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp4_tp2_pp1_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..86d0dac2810693b0d750b449a82c42e500a19226 --- /dev/null +++ b/configs/config_80G_dp4_tp2_pp1_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp4_tp2_pp1_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp4_tp4_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp4_tp4_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ac8a297fe8a851fa5ca96b1513c547dddd4374db --- /dev/null +++ b/configs/config_80G_dp4_tp4_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp4_tp4_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp64_tp2_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp64_tp2_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d4af1636b2a1d39d09a8998154f729c6fd9de462 --- /dev/null +++ b/configs/config_80G_dp64_tp2_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp64_tp2_pp2_acc2_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp8_tp8_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp8_tp8_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e7cac3743010ef043df02bb2139673204f9d828 --- /dev/null +++ b/configs/config_80G_dp8_tp8_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp8_tp8_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_dp64_tp8_pp1_acc1_mbs8_seq2048.yaml b/configs/config_dp64_tp8_pp1_acc1_mbs8_seq2048.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5e1fb55c14d3f2e4478d401dd31d315c94f577cd --- /dev/null +++ b/configs/config_dp64_tp8_pp1_acc1_mbs8_seq2048.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: dp64_tp8_pp1_acc1_mbs8_seq2048 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 24 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 49152 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100