diff --git a/configs/config_1.14G_dp16_tp1_pp2_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp16_tp1_pp2_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1f843be346246d2ca41c61675ba3d9b1f1b1215 --- /dev/null +++ b/configs/config_1.14G_dp16_tp1_pp2_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp16_tp1_pp2_acc2_mbs16_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp16_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp16_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e059cb1dfdc5d778cc45099753139318308f46a5 --- /dev/null +++ b/configs/config_1.14G_dp16_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp16_tp2_pp1_acc1_mbs32_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp32_pp1_acc16_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp32_pp1_acc16_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..997f754fcd64b7d8cadba1ed4cfef693fd71a1a8 --- /dev/null +++ b/configs/config_1.14G_dp2_tp32_pp1_acc16_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp32_pp1_acc16_mbs16_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp32_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp32_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..09c3cae8190cae36d6fff5d79b835af435bb42f8 --- /dev/null +++ b/configs/config_1.14G_dp2_tp32_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp32_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp4_pp1_acc4_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp4_pp1_acc4_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..25090db8802dfe7c4912671d5812357448c0c779 --- /dev/null +++ b/configs/config_1.14G_dp2_tp4_pp1_acc4_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp4_pp1_acc4_mbs4_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp8_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp8_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5e94ff4334e4559cf6965e2cdef37fbc9b4d3eeb --- /dev/null +++ b/configs/config_1.14G_dp2_tp8_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp8_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp32_tp16_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp32_tp16_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..529e4bf209028bbe7dcdac60126ac5255ac39252 --- /dev/null +++ b/configs/config_1.14G_dp32_tp16_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp32_tp16_pp1_acc2_mbs32_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp32_tp16_pp1_acc4_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp32_tp16_pp1_acc4_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cec40b0b895390f8695fab399e65349b5f3cffab --- /dev/null +++ b/configs/config_1.14G_dp32_tp16_pp1_acc4_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp32_tp16_pp1_acc4_mbs1_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp32_tp1_pp2_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp32_tp1_pp2_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..84a54f9bb830d12c5cd9a1326d9c2c36b03a8ffd --- /dev/null +++ b/configs/config_1.14G_dp32_tp1_pp2_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp32_tp1_pp2_acc16_mbs1_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp32_tp1_pp2_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp32_tp1_pp2_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b451d4872c8116b25b3f2f617295e81a8141efa --- /dev/null +++ b/configs/config_1.14G_dp32_tp1_pp2_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp32_tp1_pp2_acc1_mbs1_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp32_tp2_pp1_acc16_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp32_tp2_pp1_acc16_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..212d10561c6eaaf75e409de988501a6c908e5e31 --- /dev/null +++ b/configs/config_1.14G_dp32_tp2_pp1_acc16_mbs4_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp32_tp2_pp1_acc16_mbs4_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp32_tp4_pp1_acc2_mbs32_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp32_tp4_pp1_acc2_mbs32_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f807c24ade4e5c55f33b83a45e11a6848e29edca --- /dev/null +++ b/configs/config_1.14G_dp32_tp4_pp1_acc2_mbs32_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp32_tp4_pp1_acc2_mbs32_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp1_pp2_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp1_pp2_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9be2ac4d5fba5ec148a6a27c81e43466f64543e0 --- /dev/null +++ b/configs/config_1.14G_dp4_tp1_pp2_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp1_pp2_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..76d4903af27178345529404f8d0899d2c423a1ab --- /dev/null +++ b/configs/config_1.14G_dp4_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp2_pp1_acc1_mbs128_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp2_pp1_acc4_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp2_pp1_acc4_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21756394fd5b456c81139cde6122a9c3e33abb1a --- /dev/null +++ b/configs/config_1.14G_dp4_tp2_pp1_acc4_mbs128_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp2_pp1_acc4_mbs128_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp32_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp32_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f89f417552072e2ef57623803bd4a37faf0160b7 --- /dev/null +++ b/configs/config_1.14G_dp4_tp32_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp32_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp4_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp4_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5c9d571a924959282b23ff12a5bccf1669e95615 --- /dev/null +++ b/configs/config_1.14G_dp4_tp4_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp4_pp1_acc16_mbs2_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp4_pp1_acc1_mbs32_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp4_pp1_acc1_mbs32_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fd16091658575a5df95661ec580239c33b9062b0 --- /dev/null +++ b/configs/config_1.14G_dp4_tp4_pp1_acc1_mbs32_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp4_pp1_acc1_mbs32_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp4_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp4_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fbf5e457f30eb541eef7af3fbaa87ad7410e68b2 --- /dev/null +++ b/configs/config_1.14G_dp4_tp4_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp4_pp1_acc32_mbs4_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3291d00ffa7b4ecae1c49a3c26746fac695f78a6 --- /dev/null +++ b/configs/config_1.14G_dp4_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp8_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp16_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp8_tp16_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a0f55ab5442c0f7219c88929d6c91cfa127a3db --- /dev/null +++ b/configs/config_1.14G_dp8_tp16_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp16_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp1_pp2_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp1_pp2_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..01d8ed6a2888e959c1338c73921e845686fda1d0 --- /dev/null +++ b/configs/config_1.14G_dp8_tp1_pp2_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp1_pp2_acc32_mbs2_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp2_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp2_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..94ad562f5c2481132b78bec19c5f9a14722c61e4 --- /dev/null +++ b/configs/config_1.14G_dp8_tp2_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp2_pp1_acc128_mbs2_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp8_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp8_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5f5c3db2416bc70e7a0e903592a7b7242c253b36 --- /dev/null +++ b/configs/config_1.14G_dp8_tp8_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp8_pp1_acc16_mbs4_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp16_pp1_acc8_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp16_pp1_acc8_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d473e83fe07ec0f39c54b9317d0ab6eb7c6e884 --- /dev/null +++ b/configs/config_1.34G_dp16_tp16_pp1_acc8_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp16_pp1_acc8_mbs4_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp1_pp1_acc8.0_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp1_pp1_acc8.0_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4bec874e54019aee22f6d4adb5b5afbaf0325766 --- /dev/null +++ b/configs/config_1.34G_dp16_tp1_pp1_acc8.0_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp1_pp1_acc8.0_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8.0 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp2_pp1_acc32_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp16_tp2_pp1_acc32_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f228f3d2ba96dd238153d08fc1bb860241f6917b --- /dev/null +++ b/configs/config_1.34G_dp16_tp2_pp1_acc32_mbs1_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp2_pp1_acc32_mbs1_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp16_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..673cdc9afbcaebbcf834ebf502ee91c0da7aaec7 --- /dev/null +++ b/configs/config_1.34G_dp16_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp2_pp1_acc4_mbs8_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c482d3c3e65c41c9517c2b45a02448de8e7121c --- /dev/null +++ b/configs/config_1.34G_dp16_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp32_pp1_acc128_mbs1_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp256_tp2_pp1_acc4_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp256_tp2_pp1_acc4_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af9ae0470ab70ea4c49066d8bd7df0df157afb10 --- /dev/null +++ b/configs/config_1.34G_dp256_tp2_pp1_acc4_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp256_tp2_pp1_acc4_mbs2_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 256 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp16_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp16_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff4cac0e378c4a2773150a0a5034159253398a57 --- /dev/null +++ b/configs/config_1.34G_dp2_tp16_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp16_pp1_acc128_mbs2_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp16_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp16_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6040b60c5cc0fe9f0ca63b075bf4d74951932acd --- /dev/null +++ b/configs/config_1.34G_dp2_tp16_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp16_pp1_acc4_mbs16_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp256_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp256_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3724954e55234a47ac31b46ad700d34d5dc2640a --- /dev/null +++ b/configs/config_1.34G_dp2_tp256_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp256_pp1_acc256_mbs1_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 256 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp32_pp1_acc64_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp32_pp1_acc64_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b456064ca390e1687f03bb39d3d5f8506a9e91c --- /dev/null +++ b/configs/config_1.34G_dp2_tp32_pp1_acc64_mbs4_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp32_pp1_acc64_mbs4_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f17d9427412ea1981efb9d35bc21e1b5b500b845 --- /dev/null +++ b/configs/config_1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp8_pp1_acc32_mbs2_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp1_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp32_tp1_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73423795aa33d1c960cd6836197b50c65f40cef5 --- /dev/null +++ b/configs/config_1.34G_dp32_tp1_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp1_pp1_acc2_mbs8_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..37a22eddae0746a16d84d78585d2216486308283 --- /dev/null +++ b/configs/config_1.34G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp32_tp8_pp1_acc1_mbs4_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp16_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp16_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8067f59ff1faa94a934de911985c5584d203283f --- /dev/null +++ b/configs/config_1.34G_dp4_tp16_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp16_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp16_pp1_acc4_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp16_pp1_acc4_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e93001c087696cc0a5f8fab5fe8cb0547049250b --- /dev/null +++ b/configs/config_1.34G_dp4_tp16_pp1_acc4_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp16_pp1_acc4_mbs128_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0f75190590eecb7f241500ecd38b828ff511620 --- /dev/null +++ b/configs/config_1.34G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..15f8588195b44dc4c4e4a63bc6d24fcaa3619bb0 --- /dev/null +++ b/configs/config_1.34G_dp4_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp32_pp1_acc2_mbs64_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df588b819d0482934ce62f3fa416ddb367925304 --- /dev/null +++ b/configs/config_1.34G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp64_tp2_pp1_acc8_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp64_tp2_pp1_acc8_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba1fd5fac97ec926ff05e236e6193e365f14d970 --- /dev/null +++ b/configs/config_1.34G_dp64_tp2_pp1_acc8_mbs4_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp64_tp2_pp1_acc8_mbs4_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..44b62a620db3c27d1925e295767925596abe2d0d --- /dev/null +++ b/configs/config_1.34G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp16_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp16_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c78b58dd4ae5f28e8035688c805f350704b4dd78 --- /dev/null +++ b/configs/config_1.34G_dp8_tp16_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp16_pp2_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp2_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp2_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d224be4f15d039e796539688c1b729613c620831 --- /dev/null +++ b/configs/config_1.34G_dp8_tp2_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp2_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34d38eb71be95625f77b600edf5d690265d148c3 --- /dev/null +++ b/configs/config_1.34G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp32_pp1_acc2_mbs2_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp32_pp1_acc4_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp32_pp1_acc4_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c97745700a9acad4ba2e85e046225014025fc328 --- /dev/null +++ b/configs/config_1.34G_dp8_tp32_pp1_acc4_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp32_pp1_acc4_mbs4_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp8_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp8_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..db0d81905065efb1930d4381cf88c6b646ce12a5 --- /dev/null +++ b/configs/config_1.34G_dp8_tp8_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp8_pp1_acc8_mbs8_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_2.28G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l26_h2304_heads16.yaml b/configs/config_2.28G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l26_h2304_heads16.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5bcb29f8f7605ce1ba94e48a272d209434cd005 --- /dev/null +++ b/configs/config_2.28G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l26_h2304_heads16.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 2.28G_dp1_tp8_pp1_acc1_mbs1_seq2048_zero0_tpmodeRED_l26_h2304_heads16 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2304 + initializer_range: 0.02 + intermediate_size: 9216 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 16 + num_hidden_layers: 26 + num_key_value_heads: 16 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp2_tp8_pp8_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp2_tp8_pp8_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..01f1f1776264f1679d7f5ea06b285d658e4424e3 --- /dev/null +++ b/configs/config_3.57G_dp2_tp8_pp8_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp2_tp8_pp8_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp4_tp8_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp4_tp8_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba61669fefb137b708381fdb5a24ea26ad0161f6 --- /dev/null +++ b/configs/config_3.57G_dp4_tp8_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp4_tp8_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp4_tp8_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp4_tp8_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d490f97c39a4094854e7dd4a242666256fecc31b --- /dev/null +++ b/configs/config_3.57G_dp4_tp8_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp4_tp8_pp2_acc4_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp8_tp16_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp8_tp16_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aa6153e110fbb62779bd110befdc18928f6cd409 --- /dev/null +++ b/configs/config_3.57G_dp8_tp16_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp8_tp16_pp2_acc4_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_37.8G_dp8_tp32_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml b/configs/config_37.8G_dp8_tp32_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml new file mode 100644 index 0000000000000000000000000000000000000000..acf95b27f1130bc739cd84a81f217492ad8a8132 --- /dev/null +++ b/configs/config_37.8G_dp8_tp32_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 37.8G_dp8_tp32_pp2_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 131072 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp1_tp1_pp128_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp1_tp1_pp128_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..366dd72c144909bd49799a32188d1a894daf59b3 --- /dev/null +++ b/configs/config_469G_dp1_tp1_pp128_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp1_tp1_pp128_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 128 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp1_tp4_pp32_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp1_tp4_pp32_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..283518fc88c5f5fd58f7a19cc5916cae497e02d4 --- /dev/null +++ b/configs/config_469G_dp1_tp4_pp32_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp1_tp4_pp32_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp2_tp1_pp16_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp2_tp1_pp16_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..599700f7a27c69ecb9247c215d1cfa575e4a7351 --- /dev/null +++ b/configs/config_469G_dp2_tp1_pp16_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp2_tp1_pp16_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 16 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp2_tp4_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp2_tp4_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..494bd8c8eb4be274c88dc52926eba045d225b590 --- /dev/null +++ b/configs/config_469G_dp2_tp4_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp2_tp4_pp8_acc32_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..55cbd9cc2584b94f28caaf1ddfb821d6d39a4611 --- /dev/null +++ b/configs/config_469G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp32_tp8_pp2_acc8_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp8_tp1_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp8_tp1_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e26b726dce9578597b29c4fa7d8fec2e72bd3754 --- /dev/null +++ b/configs/config_469G_dp8_tp1_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp8_tp1_pp2_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp8_tp2_pp1_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp8_tp2_pp1_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0387dd877901e08801658663c5545199a3a28146 --- /dev/null +++ b/configs/config_469G_dp8_tp2_pp1_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp8_tp2_pp1_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp8_tp8_pp4_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp8_tp8_pp4_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b9785f2d1577e031e1f1005b86dc8889e866603b --- /dev/null +++ b/configs/config_469G_dp8_tp8_pp4_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp8_tp8_pp4_acc32_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_5.5G_dp4_tp32_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml b/configs/config_5.5G_dp4_tp32_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml new file mode 100644 index 0000000000000000000000000000000000000000..40f77b50d580823c745243c070547dd93c3137e9 --- /dev/null +++ b/configs/config_5.5G_dp4_tp32_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 5.5G_dp4_tp32_pp4_acc1_mbs1_seq2048_zero0_tpmodeRED_l32_h4096_heads32 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp1_tp1_pp16_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp1_tp1_pp16_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62b4ff7424bad2fc6aae173dc60f185d9c7833ee --- /dev/null +++ b/configs/config_8.86G_dp1_tp1_pp16_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp1_tp1_pp16_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 16 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp2_tp2_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp2_tp2_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..acbc8de253876f4b1ea23b933466fc0e4cf3106d --- /dev/null +++ b/configs/config_8.86G_dp2_tp2_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp2_tp2_pp2_acc4_mbs32_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp4_tp1_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp4_tp1_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..143de8103d73cb390628f81d555280aa3c5d3db0 --- /dev/null +++ b/configs/config_8.86G_dp4_tp1_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp4_tp1_pp4_acc32_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp8_tp2_pp4_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp8_tp2_pp4_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8cdd1cfbc1aacaab92f198a2a72f576068eb5425 --- /dev/null +++ b/configs/config_8.86G_dp8_tp2_pp4_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp8_tp2_pp4_acc4_mbs8_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp8_tp4_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp8_tp4_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8094aba260af5d9b25be8a6f506f8edce9241bc1 --- /dev/null +++ b/configs/config_8.86G_dp8_tp4_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp8_tp4_pp4_acc32_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp8_tp4_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp8_tp4_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98036a729839e10cd1d9a5fd99189dca26aaafb3 --- /dev/null +++ b/configs/config_8.86G_dp8_tp4_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp8_tp4_pp8_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp16_tp1_pp8_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp16_tp1_pp8_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af650b39012f5bf7e2a24af1a9b44f8bb5f7c865 --- /dev/null +++ b/configs/config_80G_dp16_tp1_pp8_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp16_tp1_pp8_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4264d5175f7961504b39dca5c40a751059d1e95 --- /dev/null +++ b/configs/config_80G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp16_tp8_pp2_acc1_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp4_tp16_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp4_tp16_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..222f899297a30df0114b18acd69e5f91902dd419 --- /dev/null +++ b/configs/config_80G_dp4_tp16_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp4_tp16_pp4_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp4_tp4_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp4_tp4_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..37d598166370020dea171bbb4ad40d2c126f82ef --- /dev/null +++ b/configs/config_80G_dp4_tp4_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp4_tp4_pp16_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 16 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp64_tp2_pp4_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp64_tp2_pp4_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba307e48d41157c7e0390030b2a05b124c2bac7d --- /dev/null +++ b/configs/config_80G_dp64_tp2_pp4_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp64_tp2_pp4_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100