diff --git a/configs/config_1.14G_dp128_tp2_pp1_acc1_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp128_tp2_pp1_acc1_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6a7b5c7d7b1b5bd02dec1313b2a44b96b3688278 --- /dev/null +++ b/configs/config_1.14G_dp128_tp2_pp1_acc1_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp128_tp2_pp1_acc1_mbs16_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp128_tp4_pp1_acc16_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp128_tp4_pp1_acc16_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff68874e2ab3b94f881b6cd5dc7f2fface9d7fdd --- /dev/null +++ b/configs/config_1.14G_dp128_tp4_pp1_acc16_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp128_tp4_pp1_acc16_mbs1_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp128_tp4_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp128_tp4_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ae9e3f0c3736ec2ada0943b62a5ce61d80d610cb --- /dev/null +++ b/configs/config_1.14G_dp128_tp4_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp128_tp4_pp1_acc1_mbs4_seq8192_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp16_tp1_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp16_tp1_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d8baa115114050d793c7b17b17119ee8ed23ce2c --- /dev/null +++ b/configs/config_1.14G_dp16_tp1_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp16_tp1_pp1_acc2_mbs4_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp16_tp32_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp16_tp32_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1c2e7c05f68f5ba365fece694672d782fcf9f15f --- /dev/null +++ b/configs/config_1.14G_dp16_tp32_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp16_tp32_pp1_acc8_mbs1_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp128_pp1_acc4_mbs4_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp2_tp128_pp1_acc4_mbs4_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b29958c9a4239640941a9e322747948ac71eef64 --- /dev/null +++ b/configs/config_1.14G_dp2_tp128_pp1_acc4_mbs4_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp128_pp1_acc4_mbs4_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp1_pp8_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp1_pp8_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b14e42eac86a369bd1f512ca78582a86163bd8e4 --- /dev/null +++ b/configs/config_1.14G_dp2_tp1_pp8_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp1_pp8_acc32_mbs32_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp2_tp32_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp2_tp32_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ebd8eaf2a0d546ab07ab8688e82ff9ec1c521e44 --- /dev/null +++ b/configs/config_1.14G_dp2_tp32_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp2_tp32_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp128_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp128_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..635adca3d9452759b2c00cc6ac910675c3da2017 --- /dev/null +++ b/configs/config_1.14G_dp4_tp128_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp128_pp1_acc2_mbs16_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..554f12e1da9613592017f4ba84ff7f02f744460b --- /dev/null +++ b/configs/config_1.14G_dp4_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp2_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp4_tp2_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..475f6c8fd7448aee89af5b4bc9d1ae99ceac68a9 --- /dev/null +++ b/configs/config_1.14G_dp4_tp2_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp2_pp1_acc32_mbs16_seq2048_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e7750e7baa0bf77439e3c2ea3f55a000ef80134b --- /dev/null +++ b/configs/config_1.14G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp2_pp1_acc4_mbs32_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp4_tp32_pp1_acc8_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp4_tp32_pp1_acc8_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2b0eceb455ca49c46570e0fe3b6214125db7f66b --- /dev/null +++ b/configs/config_1.14G_dp4_tp32_pp1_acc8_mbs16_seq8192_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp4_tp32_pp1_acc8_mbs16_seq8192_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp64_tp1_pp2_acc1_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp64_tp1_pp2_acc1_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8a478687f47def7a5cc9b9de23f9bdc1435c8983 --- /dev/null +++ b/configs/config_1.14G_dp64_tp1_pp2_acc1_mbs32_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp64_tp1_pp2_acc1_mbs32_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp64_tp4_pp1_acc2_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml b/configs/config_1.14G_dp64_tp4_pp1_acc2_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2dafc4506ed02412b8b3d514585d5ea03d1d7770 --- /dev/null +++ b/configs/config_1.14G_dp64_tp4_pp1_acc2_mbs1_seq32768_zero1_tpmodeALL_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp64_tp4_pp1_acc2_mbs1_seq32768_zero1_tpmodeALL_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.14G_dp8_tp4_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml b/configs/config_1.14G_dp8_tp4_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f96e06581b920b26d04b0253cc44fc818f0bf77e --- /dev/null +++ b/configs/config_1.14G_dp8_tp4_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab32k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.14G_dp8_tp4_pp1_acc4_mbs16_seq2048_zero1_tpmodeRED_vocab32k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp128_tp4_pp1_acc4_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp128_tp4_pp1_acc4_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5a247bf4fc235f73024288af04518e861d35282 --- /dev/null +++ b/configs/config_1.34G_dp128_tp4_pp1_acc4_mbs4_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp128_tp4_pp1_acc4_mbs4_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 128 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp1_pp2_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp16_tp1_pp2_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b268c7c168cb50f5165d247d01270a5e4577f967 --- /dev/null +++ b/configs/config_1.34G_dp16_tp1_pp2_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp1_pp2_acc4_mbs2_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp16_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp16_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34030f3bf34b1b512f05f4779891f0b183c3b081 --- /dev/null +++ b/configs/config_1.34G_dp16_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp16_tp2_pp1_acc128_mbs1_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp1_tp1_pp32_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp1_tp1_pp32_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..13487d175c0fa9703e29031c43581a6714b1f40e --- /dev/null +++ b/configs/config_1.34G_dp1_tp1_pp32_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp1_tp1_pp32_acc4_mbs64_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp1_tp4_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp1_tp4_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e7d9ad761b14b5660a4ee8c35c1ef2e32daf7de2 --- /dev/null +++ b/configs/config_1.34G_dp1_tp4_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp1_tp4_pp8_acc32_mbs8_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp16_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp16_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7a20647f2958b4f7ce559825b8c627dfeb98ce54 --- /dev/null +++ b/configs/config_1.34G_dp2_tp16_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp16_pp2_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp1_pp4_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp1_pp4_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6db8a404d4a5aabafbc0d1266ff17c8f72e20ca7 --- /dev/null +++ b/configs/config_1.34G_dp2_tp1_pp4_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp1_pp4_acc2_mbs128_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp1_pp4_acc64_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp1_pp4_acc64_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1c7cfb86168fac395f0384b658fb87fb76445a11 --- /dev/null +++ b/configs/config_1.34G_dp2_tp1_pp4_acc64_mbs1_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp1_pp4_acc64_mbs1_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp256_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp256_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c9da878fd7834feb00aac7b15b44d57178b725c --- /dev/null +++ b/configs/config_1.34G_dp2_tp256_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp256_pp1_acc4_mbs64_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 256 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..48fdd46237a4592ce83f19af5ceab4043782e59a --- /dev/null +++ b/configs/config_1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp2_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49e8e9b4e37fb4c3035c5e25e42cc695f216f1c8 --- /dev/null +++ b/configs/config_1.34G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp64_pp1_acc1_mbs64_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 64 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp64_pp1_acc8_mbs128_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp64_pp1_acc8_mbs128_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c4583a330abd2c8c1c0dcdab69e4862fa5a3143 --- /dev/null +++ b/configs/config_1.34G_dp2_tp64_pp1_acc8_mbs128_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp64_pp1_acc8_mbs128_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp2_tp8_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp2_tp8_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c71fe08b53116c4aed25a0fdf50298e2c050dd5 --- /dev/null +++ b/configs/config_1.34G_dp2_tp8_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp2_tp8_pp1_acc16_mbs1_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp128_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp128_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8075865232ab400befaf1201e6a8ed3210cb553c --- /dev/null +++ b/configs/config_1.34G_dp4_tp128_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp128_pp1_acc8_mbs1_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 128 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp16_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp16_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c39e41becefe56806a6e33cad5c4e29c7ab4c06 --- /dev/null +++ b/configs/config_1.34G_dp4_tp16_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp16_pp1_acc32_mbs1_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp16_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp16_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..81f3da833b2b7ef6ae6104ab835e889cbf39155c --- /dev/null +++ b/configs/config_1.34G_dp4_tp16_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp16_pp1_acc8_mbs4_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp2_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp2_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4bb3c654da39610cbb77266cc3ebf783af3172e6 --- /dev/null +++ b/configs/config_1.34G_dp4_tp2_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp2_pp1_acc8_mbs16_seq2048_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp64_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp64_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bb5795a1b6efc703d5fe6ff3872274c84fb0532f --- /dev/null +++ b/configs/config_1.34G_dp4_tp64_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp64_pp1_acc2_mbs16_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp8_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp4_tp8_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d3a36f413bcf5b8daf21982fb7bb3f9b0d1e1ecd --- /dev/null +++ b/configs/config_1.34G_dp4_tp8_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp8_pp1_acc256_mbs2_seq2048_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp4_tp8_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp4_tp8_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d89c4a6e897033248992f073c2ce64d411d82980 --- /dev/null +++ b/configs/config_1.34G_dp4_tp8_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp4_tp8_pp2_acc16_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a6d39c9646662e78061be9d43ae709b653703500 --- /dev/null +++ b/configs/config_1.34G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp16_pp1_acc32_mbs2_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 32 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp2_pp1_acc4.0_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp2_pp1_acc4.0_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bca4853727ede5c9759f0aa08a60c4264bb21468 --- /dev/null +++ b/configs/config_1.34G_dp8_tp2_pp1_acc4.0_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp2_pp1_acc4.0_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4.0 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp32_pp1_acc4_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp32_pp1_acc4_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bdb538203b0f12fcaca49387b3be9dec20c56472 --- /dev/null +++ b/configs/config_1.34G_dp8_tp32_pp1_acc4_mbs16_seq8192_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp32_pp1_acc4_mbs16_seq8192_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp4_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp4_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f120b90b41391f78523398bfd9163a30cda58042 --- /dev/null +++ b/configs/config_1.34G_dp8_tp4_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp4_pp1_acc2_mbs8_seq32768_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp64_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml b/configs/config_1.34G_dp8_tp64_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b0cf2d7ae9e551209a93e78b1f915115e0920877 --- /dev/null +++ b/configs/config_1.34G_dp8_tp64_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp64_pp1_acc1_mbs16_seq32768_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 32768 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 32768 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_1.34G_dp8_tp64_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml b/configs/config_1.34G_dp8_tp64_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2967de3af3d3a42365f4ba68f7067c59a22654c2 --- /dev/null +++ b/configs/config_1.34G_dp8_tp64_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 1.34G_dp8_tp64_pp1_acc8_mbs2_seq8192_zero1_tpmodeALL_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 2048 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 8192 + num_attention_heads: 32 + num_hidden_layers: 16 + num_key_value_heads: 8 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 64 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 8192 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_235M_dp512_tp1_pp1_acc1_mbs2_seq4096_zero0_l12_h1024_heads16.yaml b/configs/config_235M_dp512_tp1_pp1_acc1_mbs2_seq4096_zero0_l12_h1024_heads16.yaml new file mode 100644 index 0000000000000000000000000000000000000000..58e38953266051473ad583adb2537ad5278ed182 --- /dev/null +++ b/configs/config_235M_dp512_tp1_pp1_acc1_mbs2_seq4096_zero0_l12_h1024_heads16.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 235M_dp512_tp1_pp1_acc1_mbs2_seq4096_zero0_l12_h1024_heads16 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 1024 + initializer_range: 0.02 + intermediate_size: 4096 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 16 + num_hidden_layers: 12 + num_key_value_heads: 16 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 512 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp1_tp2_pp64_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp1_tp2_pp64_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..442754d9573e1be401c0c9cd89037ecac7ad936d --- /dev/null +++ b/configs/config_3.57G_dp1_tp2_pp64_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp1_tp2_pp64_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 64 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp1_tp32_pp2_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp1_tp32_pp2_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bb247783950636956c1a7ffd6cc31dfc75f3b265 --- /dev/null +++ b/configs/config_3.57G_dp1_tp32_pp2_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp1_tp32_pp2_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp1_tp8_pp1_acc1_mbs12_seq4096_zero0_tpmodeALL_vocab131k_cache.yaml b/configs/config_3.57G_dp1_tp8_pp1_acc1_mbs12_seq4096_zero0_tpmodeALL_vocab131k_cache.yaml new file mode 100644 index 0000000000000000000000000000000000000000..86d02d60bc629ebfe74f72523bdf14351b55f945 --- /dev/null +++ b/configs/config_3.57G_dp1_tp8_pp1_acc1_mbs12_seq4096_zero0_tpmodeALL_vocab131k_cache.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp1_tp8_pp1_acc1_mbs12_seq4096_zero0_tpmodeALL_vocab131k_cache + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: false + tp_mode: ALL_REDUCE +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 12 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp1_tp8_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp1_tp8_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..246ea01bf42bbb53023af529abb5c24e8449bfde --- /dev/null +++ b/configs/config_3.57G_dp1_tp8_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp1_tp8_pp2_acc2_mbs128_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 128 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7dff83c560c9be462f81d82931927d3ebc710f3a --- /dev/null +++ b/configs/config_3.57G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp4_tp4_pp1_acc2_mbs32_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c0922e0f07778b585939536cdca2e1fe847055ae --- /dev/null +++ b/configs/config_3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 3.57G_dp64_tp4_pp2_acc4_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 3072 + initializer_range: 0.02 + intermediate_size: 8192 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 28 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: true + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 64 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_37.8G_dp1_tp8_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml b/configs/config_37.8G_dp1_tp8_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml new file mode 100644 index 0000000000000000000000000000000000000000..800d595571bc91d992a7677cbc3dfc71110f72a3 --- /dev/null +++ b/configs/config_37.8G_dp1_tp8_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 37.8G_dp1_tp8_pp64_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 131072 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 64 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_37.8G_dp8_tp8_pp8_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml b/configs/config_37.8G_dp8_tp8_pp8_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f05a48b44b4ab23dc050a6f74defacc9d3efb365 --- /dev/null +++ b/configs/config_37.8G_dp8_tp8_pp8_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: bench_seqlen.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 37.8G_dp8_tp8_pp8_acc1_mbs1_seq2048_zero0_tpmodeRED_l80_h8192_heads64 + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 131072 + is_llama_config: true + max_position_embeddings: 2048 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 32768 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 2048 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp16_tp2_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp16_tp2_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..38b3ac47456b06798a7510e799925d2df80fd29d --- /dev/null +++ b/configs/config_469G_dp16_tp2_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp16_tp2_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp16_tp8_pp4_acc16_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp16_tp8_pp4_acc16_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6959cb807fc3e68b97e518e2a98fb3dba781341d --- /dev/null +++ b/configs/config_469G_dp16_tp8_pp4_acc16_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp16_tp8_pp4_acc16_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp1_tp8_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp1_tp8_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..27bea98d7f9c1205918a0ba866ee4291f73641b8 --- /dev/null +++ b/configs/config_469G_dp1_tp8_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp1_tp8_pp16_acc256_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 16 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 256 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp2_tp256_pp1_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp2_tp256_pp1_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cb8a1a71f8320258a449a2540ca0989988d316a9 --- /dev/null +++ b/configs/config_469G_dp2_tp256_pp1_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp2_tp256_pp1_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 256 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp2_tp4_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp2_tp4_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6275791d896aaf84ca1e40947acf729c92dfacb9 --- /dev/null +++ b/configs/config_469G_dp2_tp4_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp2_tp4_pp2_acc128_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp32_tp1_pp2_acc1_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp32_tp1_pp2_acc1_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..81ab5a46f1f8426e63d4fd0e73c71aed3f377752 --- /dev/null +++ b/configs/config_469G_dp32_tp1_pp2_acc1_mbs8_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp32_tp1_pp2_acc1_mbs8_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp32_tp8_pp1_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp32_tp8_pp1_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fd18205b2ff88f5a3693f427a18b305ac982d809 --- /dev/null +++ b/configs/config_469G_dp32_tp8_pp1_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp32_tp8_pp1_acc4_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_469G_dp8_tp1_pp16_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_469G_dp8_tp1_pp16_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0cf4c26f71517f8cfc99fb6c33eff6f1acead665 --- /dev/null +++ b/configs/config_469G_dp8_tp1_pp16_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 469G_dp8_tp1_pp16_acc16_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 16384 + initializer_range: 0.02 + intermediate_size: 53248 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 128 + num_hidden_layers: 126 + num_key_value_heads: 128 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 16 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp16_tp16_pp1_acc1_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp16_tp16_pp1_acc1_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0841ed933651f7e5c71eec6669057dece6a28661 --- /dev/null +++ b/configs/config_8.86G_dp16_tp16_pp1_acc1_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp16_tp16_pp1_acc1_mbs16_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp1_tp2_pp32_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp1_tp2_pp32_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cfe886e1990aa224040a4f8475665aaa84193c51 --- /dev/null +++ b/configs/config_8.86G_dp1_tp2_pp32_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp1_tp2_pp32_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 32 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp1_tp4_pp8_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp1_tp4_pp8_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8455964dd5d698c23bd0f9ee381f92fae9a9b320 --- /dev/null +++ b/configs/config_8.86G_dp1_tp4_pp8_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp1_tp4_pp8_acc8_mbs32_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 32 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp1_tp8_pp1_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp1_tp8_pp1_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f40370a4c779f7a422ba2da3b37ca080cf419373 --- /dev/null +++ b/configs/config_8.86G_dp1_tp8_pp1_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp1_tp8_pp1_acc1_mbs256_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 1 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 256 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp2_tp1_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp2_tp1_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dee035b772c0d9fef8a7060176935027215d6c70 --- /dev/null +++ b/configs/config_8.86G_dp2_tp1_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp2_tp1_pp4_acc128_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 128 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp2_tp2_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp2_tp2_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e9305fdca7d498a455f98f9bebcf4b52b085709 --- /dev/null +++ b/configs/config_8.86G_dp2_tp2_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp2_tp2_pp2_acc8_mbs16_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_8.86G_dp4_tp2_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_8.86G_dp4_tp2_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..274e23a9b0ec14e3b82298921f605b5ec0b69765 --- /dev/null +++ b/configs/config_8.86G_dp4_tp2_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 8.86G_dp4_tp2_pp8_acc8_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 4096 + initializer_range: 0.02 + intermediate_size: 14336 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 32 + num_hidden_layers: 32 + num_key_value_heads: 32 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 2 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp16_tp16_pp1_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp16_tp16_pp1_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6abf1f4da3288f2b53f0e264c4ea317616bc4339 --- /dev/null +++ b/configs/config_80G_dp16_tp16_pp1_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp16_tp16_pp1_acc4_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 16 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 4 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp16_tp4_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp16_tp4_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0de7cd9d7f2f9acea305e921438fe1d210217a10 --- /dev/null +++ b/configs/config_80G_dp16_tp4_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp16_tp4_pp1_acc16_mbs1_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 1 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp16_tp8_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp16_tp8_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26199f2f646a61a958cfa82d28676547b7bd434b --- /dev/null +++ b/configs/config_80G_dp16_tp8_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp16_tp8_pp4_acc8_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 16 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp1_tp4_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp1_tp4_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de0180c72857e488c4fe35a7a128681d61cdf18a --- /dev/null +++ b/configs/config_80G_dp1_tp4_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp1_tp4_pp8_acc16_mbs16_seq4096_zero0_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 0 +parallelism: + dp: 1 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 16 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp2_tp32_pp8_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp2_tp32_pp8_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..11afe036c5423f751128a0003b0e3e21ca3bce36 --- /dev/null +++ b/configs/config_80G_dp2_tp32_pp8_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp2_tp32_pp8_acc64_mbs2_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 8 + pp_engine: 1f1b + tp: 32 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 2 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp2_tp8_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp2_tp8_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a91eec7ec9531ec513c664cb4ea0efc01d92b580 --- /dev/null +++ b/configs/config_80G_dp2_tp8_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp2_tp8_pp4_acc16_mbs8_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 2 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 16 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 8 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp32_tp8_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp32_tp8_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f1dee1aa8d1be13a1d4c61be70557c64463d357 --- /dev/null +++ b/configs/config_80G_dp32_tp8_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp32_tp8_pp2_acc2_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 32 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 8 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 2 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp4_tp1_pp2_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp4_tp1_pp2_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..943ded7ed5cef0767fbb27518c0223955885fbdb --- /dev/null +++ b/configs/config_80G_dp4_tp1_pp2_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp4_tp1_pp2_acc64_mbs1_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 4 + expert_parallel_size: 1 + pp: 2 + pp_engine: 1f1b + tp: 1 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 64 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 1 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100 diff --git a/configs/config_80G_dp8_tp4_pp4_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml b/configs/config_80G_dp8_tp4_pp4_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ede345475adeab019c226529d4f8eb5727b14c5 --- /dev/null +++ b/configs/config_80G_dp8_tp4_pp4_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k.yaml @@ -0,0 +1,91 @@ +checkpoints: + checkpoint_interval: 10000 + checkpoints_path: checkpoints + checkpoints_path_is_shared_file_system: false + resume_checkpoint_path: null + save_initial_state: false +data_stages: +- data: + dataset: null + num_loading_workers: 1 + seed: 42 + name: Stable Training Stage + start_training_step: 1 +general: + benchmark_csv_path: benchmark/results/bench_final2.csv + consumed_train_samples: null + ignore_sanity_checks: true + project: debug + run: 80G_dp8_tp4_pp4_acc8_mbs4_seq4096_zero1_tpmodeRED_vocab131k + seed: 42 + step: null +lighteval: null +logging: + iteration_step_info_interval: 1 + log_level: info + log_level_replica: info +model: + ddp_bucket_cap_mb: 25 + dtype: bfloat16 + init_method: + std: 0.02 + make_vocab_size_divisible_by: 1 + model_config: + bos_token_id: 0 + eos_token_id: 0 + hidden_act: silu + hidden_size: 8192 + initializer_range: 0.02 + intermediate_size: 28672 + is_llama_config: true + max_position_embeddings: 4096 + num_attention_heads: 64 + num_hidden_layers: 80 + num_key_value_heads: 64 + pad_token_id: null + pretraining_tp: 1 + rms_norm_eps: 1.0e-05 + rope_scaling: null + tie_word_embeddings: false + use_cache: true + vocab_size: 131072 +optimizer: + accumulate_grad_in_fp32: true + clip_grad: 1.0 + learning_rate_scheduler: + learning_rate: 0.0003 + lr_decay_starting_step: null + lr_decay_steps: 13 + lr_decay_style: cosine + lr_warmup_steps: 2 + lr_warmup_style: linear + min_decay_lr: 1.0e-05 + optimizer_factory: + adam_beta1: 0.9 + adam_beta2: 0.95 + adam_eps: 1.0e-08 + name: adamW + torch_adam_is_fused: true + weight_decay: 0.01 + zero_stage: 1 +parallelism: + dp: 8 + expert_parallel_size: 1 + pp: 4 + pp_engine: 1f1b + tp: 4 + tp_linear_async_communication: true + tp_mode: REDUCE_SCATTER +profiler: null +tokenizer: + tokenizer_max_length: null + tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel + tokenizer_revision: null +tokens: + batch_accumulation_per_replica: 8 + limit_test_batches: 0 + limit_val_batches: 0 + micro_batch_size: 4 + sequence_length: 4096 + train_steps: 100 + val_check_interval: 100