diff --git "a/attnserver.run_attnserver.slurm.sh.343216.out.log" "b/attnserver.run_attnserver.slurm.sh.343216.out.log" new file mode 100644--- /dev/null +++ "b/attnserver.run_attnserver.slurm.sh.343216.out.log" @@ -0,0 +1,27658 @@ +Running ctx_length=1024, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 1024 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 1024 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 1024 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 1024 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.061 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.595 seconds +time to initialize megatron (seconds): 8.687 +[after megatron is initialized] datetime: 2025-06-21 22:10:39 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 144247808 +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 144247808 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 144247808 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 144247808 +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (144247808 elements, 144247808 padded size): + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.embedding.word_embeddings.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.18, 3.59) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:10:40 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=1024, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.006707 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66592 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.003341 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66562 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.003330 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 66686 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:10:40 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (469.37, 501.51) + train/valid/test-data-iterators-setup ..........: (21.15, 169.79) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:10:40 +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask batch tensor:torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids tokenstorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: batch tensor after cp:loss_mask torch.Size([8, 1024])tokens + batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192])torch.Size([8, 1024]) + +batch tensor after cp: position_ids batch tensor after cp:torch.Size([8, 1024]) +labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp:batch tensor after cp: position_ids torch.Size([8, 1024]) +tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024])batch tensor after cp: + batch tensor after cp:tokens labels torch.Size([8, 1024]) +batch tensor after cp:torch.Size([8, 1024]) +loss_mask torch.Size([8, 1024])batch tensor after cp: + labelsbatch tensor after cp: attention_masktorch.Size([8, 1024]) +torch.Size([8, 1, 1024, 8192])batch tensor after cp: + loss_maskbatch tensor after cp: torch.Size([8, 1024])position_ids +batch tensor after cp: batch tensor after cp: torch.Size([8, 1024])attention_mask + torch.Size([8, 1, 1024, 8192])tokens + batch tensor after cp: position_ids torch.Size([8, 1024]) +torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask batch tensor after cp:torch.Size([8, 1024]) + batch tensor after cp: tokensattention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: torch.Size([8, 1024])position_ids +torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024])batch tensor after cp: +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) + batch tensor after cp:tokens labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_masktorch.Size([8, 1024]) +torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: attention_masklabels torch.Size([8, 1, 1024, 8192])torch.Size([8, 1024]) + +batch tensor after cp:batch tensor after cp: position_ids loss_masktorch.Size([8, 1024]) +torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 2] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3800.0 | max reserved: 3800.0 + [2025-06-21 22:10:59] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 18740.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 16] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3958.0 | max reserved: 3958.0 +[Rank 19] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3798.0 | max reserved: 3798.0 +[Rank 13] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3824.0 | max reserved: 3824.0[Rank 11] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3904.0 | max reserved: 3904.0 + +[Rank 15] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3832.0 | max reserved: 3832.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3896.0 | max reserved: 3896.0[Rank 14] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3896.0 | max reserved: 3896.0 + +[Rank 10] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3968.0 | max reserved: 3968.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3864.0 | max reserved: 3864.0 +[Rank 26] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3906.0 | max reserved: 3906.0 +[Rank 31] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3862.0 | max reserved: 3862.0 +[Rank 27] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3906.0 | max reserved: 3906.0[Rank 25] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3922.0 | max reserved: 3922.0 + +[Rank 24] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3922.0 | max reserved: 3922.0 +[Rank 23] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3910.0 | max reserved: 3910.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3968.0 | max reserved: 3968.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3800.0 | max reserved: 3800.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3824.0 | max reserved: 3824.0[Rank 4] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3888.0 | max reserved: 3888.0[Rank 6] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3888.0 | max reserved: 3888.0 + + +[Rank 28] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3910.0 | max reserved: 3910.0 +[Rank 21] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3846.0 | max reserved: 3846.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3968.0 | max reserved: 3968.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3840.0 | max reserved: 3840.0 +[Rank 29] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3846.0 | max reserved: 3846.0 +[Rank 18] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3766.0 | max reserved: 3766.0 +[Rank 20] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3910.0 | max reserved: 3910.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3864.0 | max reserved: 3864.0 +[Rank 30] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3862.0 | max reserved: 3862.0 +[Rank 22] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3910.0 | max reserved: 3910.0 +[Rank 17] (after 1 iterations) memory (MB) | allocated: 2378.62939453125 | max allocated: 3476.04736328125 | reserved: 3862.0 | max reserved: 3862.0 +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: batch tensor:attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +tokensbatch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: torch.Size([8, 8192])labels +torch.Size([8, 1024]) +batch tensor after cp: batch tensor:loss_mask labels torch.Size([8, 1024]) +torch.Size([8, 8192])batch tensor after cp: +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) + batch tensor:attention_mask loss_masktorch.Size([8, 1, 1024, 8192]) +torch.Size([8, 8192])batch tensor after cp: + position_ids batch tensor:torch.Size([8, 1024]) +attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor after cp:batch tensor after cp: attention_maskattention_mask torch.Size([8, 1, 1024, 8192])torch.Size([8, 1, 1024, 8192]) + +batch tensor after cp:batch tensor after cp: position_idsposition_ids torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192])batch tensor after cp: + tokensbatch tensor: labelstorch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor after cp:batch tensor: labelsloss_mask torch.Size([8, 1024])torch.Size([8, 8192]) + +batch tensor after cp:batch tensor: loss_maskattention_mask torch.Size([8, 1024])torch.Size([8, 1, 8192, 8192]) + +batch tensor after cp:batch tensor: attention_maskposition_ids torch.Size([8, 1, 1024, 8192])torch.Size([8, 8192]) + +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor:batch tensor after cp: labels torch.Size([8, 1024])tokens + batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: torch.Size([8, 8192])attention_mask +torch.Size([8, 1, 1024, 8192]) +batch tensor: batch tensor after cp:labels position_ids torch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 22:10:59] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 115.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels batch tensor:torch.Size([8, 8192]) + batch tensor: tokensloss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: torch.Size([8, 8192])attention_mask +torch.Size([8, 1, 8192, 8192]) +batch tensor:batch tensor: position_idslabels torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor:batch tensor: position_ids tokenstorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: batch tensor:tokenstokens labelstorch.Size([8, 1024])torch.Size([8, 1024]) + +torch.Size([8, 8192])batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: batch tensor:labels loss_masklabelstorch.Size([8, 1024]) +torch.Size([8, 8192])torch.Size([8, 1024]) +batch tensor after cp: +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + batch tensor after cp:batch tensor:loss_mask loss_maskattention_maskbatch tensor after cp: torch.Size([8, 1024]) torch.Size([8, 1024])torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) + +tokensbatch tensor after cp: +batch tensor after cp: torch.Size([8, 1024])attention_maskattention_maskbatch tensor: + torch.Size([8, 1, 1024, 8192])torch.Size([8, 1, 1024, 8192])position_ids +batch tensor after cp: +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) + batch tensor after cp: torch.Size([8, 8192])batch tensor after cp:labels +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) + position_idsposition_idstorch.Size([8, 1024]) +torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask batch tensor:torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) + batch tensor after cp: tokensposition_ids torch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_maskbatch tensor after cp: torch.Size([8, 8192])tokens +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) + batch tensor:torch.Size([8, 1024]) +attention_mask batch tensor after cp:torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +labelsbatch tensor: position_idstorch.Size([8, 1024]) +torch.Size([8, 8192])batch tensor after cp: + loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 22:10:59] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 88.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: loss_masktokens torch.Size([8, 1024])batch tensor: +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: tokensattention_masklabels torch.Size([8, 1, 1024, 8192])torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) + +torch.Size([8, 8192])batch tensor after cp:batch tensor after cp: +batch tensor: labels torch.Size([8, 8192]) +batch tensor: batch tensor after cp:loss_mask torch.Size([8, 8192])tokens + batch tensor: torch.Size([8, 1024])attention_mask +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: batch tensor:attention_mask torch.Size([8, 1, 8192, 8192]) +tokensbatch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor:batch tensor: attention_mask tokenstorch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) + loss_maskbatch tensor: position_ids torch.Size([8, 1024]) torch.Size([8, 1024]) +labels + batch tensor after cp:torch.Size([8, 8192]) +attention_maskbatch tensor: loss_masktorch.Size([8, 1, 1024, 8192]) +torch.Size([8, 8192]) +batch tensor after cp: position_idsbatch tensor: torch.Size([8, 1024])attention_mask + torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) + batch tensor after cp:torch.Size([8, 1, 8192, 8192]) +labels batch tensor:torch.Size([8, 1024]) +position_idsbatch tensor after cp: torch.Size([8, 8192])loss_mask + torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask batch tensor after cp:torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +tokensbatch tensor: position_idstorch.Size([8, 1024]) +torch.Size([8, 8192])batch tensor after cp: + labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: batch tensor:attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokensposition_ids torch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor:batch tensor after cp: tokens tokenstorch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024])torch.Size([8, 8192]) + +batch tensor after cp:batch tensor: position_idstokens torch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor: batch tensor after cp:labels labels torch.Size([8, 8192])torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_maskbatch tensor: torch.Size([8, 1024])labels + batch tensor after cp:torch.Size([8, 8192]) +attention_maskbatch tensor: torch.Size([8, 1, 1024, 8192])loss_mask +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids batch tensor after cp:torch.Size([8, 1024]) + tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) + +batch tensor: batch tensor after cp:loss_mask loss_masktorch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor:batch tensor after cp: attention_maskattention_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 1, 1024, 8192]) + +batch tensor after cp: tokens torch.Size([8, 1024]) + batch tensor after cp:torch.Size([8, 8192]) +position_ids batch tensor:torch.Size([8, 1024]) +attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp:batch tensor: position_idsposition_ids torch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024])batch tensor: +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_masktokens torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: batch tensor after cp: attention_masktokens torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +torch.Size([8, 1024])batch tensor: + batch tensor after cp:position_ids labelstorch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 22:10:59] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 83.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: batch tensor:attention_mask torch.Size([8, 1, 8192, 8192]) +tokensbatch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192])batch tensor: +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_masktokens torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])attention_mask +torch.Size([8, 1, 8192, 8192])batch tensor: + labelsbatch tensor: position_idstorch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192])batch tensor after cp: + batch tensor:tokens loss_mask torch.Size([8, 1024])torch.Size([8, 8192]) + +batch tensor after cp:batch tensor: labelsattention_mask torch.Size([8, 1024])torch.Size([8, 1, 8192, 8192]) + +batch tensor after cp:batch tensor: loss_maskposition_ids torch.Size([8, 1024])torch.Size([8, 8192]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor:batch tensor after cp: tokens tokenstorch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024])torch.Size([8, 8192]) + +batch tensor after cp: loss_mask batch tensor:torch.Size([8, 1024]) +labels batch tensor after cp:torch.Size([8, 8192]) +attention_mask batch tensor:torch.Size([8, 1, 1024, 8192]) +loss_mask batch tensor after cp: torch.Size([8, 8192])position_ids + torch.Size([8, 1024])batch tensor: + attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: batch tensor:tokens tokens torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labelsbatch tensor after cp: torch.Size([8, 8192])tokens + batch tensor:torch.Size([8, 1024]) +loss_mask batch tensor after cp:torch.Size([8, 8192]) +labels torch.Size([8, 8192])batch tensor: + labelsbatch tensor: loss_masktorch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: + loss_maskbatch tensor: torch.Size([8, 8192])attention_mask + batch tensor:torch.Size([8, 1, 8192, 8192]) +attention_mask batch tensor:torch.Size([8, 1, 8192, 8192]) +position_ids batch tensor:torch.Size([8, 8192]) +position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +labels batch tensor:torch.Size([8, 1024]) +attention_mask batch tensor after cp: torch.Size([8, 1, 8192, 8192])loss_mask + torch.Size([8, 1024])batch tensor: + position_idsbatch tensor after cp: torch.Size([8, 8192])attention_mask + torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens batch tensor after cp: tokenstorch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor:batch tensor after cp: labelslabels torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor after cp: tokensbatch tensor after cp: tokenstorch.Size([8, 1024]) +torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: batch tensor after cp:attention_mask torch.Size([8, 1, 1024, 8192])tokens + batch tensor after cp: torch.Size([8, 1024])position_ids + batch tensor after cp:torch.Size([8, 1024]) +batch tensor:batch tensor after cp: loss_maskloss_mask torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor:batch tensor after cp: attention_maskattention_mask torch.Size([8, 1, 8192, 8192]) +torch.Size([8, 1, 1024, 8192]) +batch tensor:batch tensor after cp: position_idsposition_ids torch.Size([8, 8192])torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + +batch tensor after cp:batch tensor after cp: attention_maskattention_mask torch.Size([8, 1, 1024, 8192])torch.Size([8, 1, 1024, 8192]) + +batch tensor after cp:batch tensor after cp: position_idsposition_ids torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens batch tensor:torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: labelstokens torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + batch tensor:tokens labels torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])loss_mask +torch.Size([8, 8192]) +batch tensor after cp:batch tensor after cp: loss_masklabels torch.Size([8, 1024])torch.Size([8, 1024])batch tensor: + +batch tensor after cp:batch tensor after cp: loss_maskattention_mask torch.Size([8, 1024]) torch.Size([8, 1, 1024, 8192]) + +batch tensor:batch tensor after cp: position_idstokens torch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: batch tensor:labels attention_mask torch.Size([8, 8192]) +torch.Size([8, 1, 8192, 8192])batch tensor: +batch tensor after cp:tokensbatch tensor after cp: position_ids attention_masktorch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) + loss_maskbatch tensor: torch.Size([8, 8192])position_ids + torch.Size([8, 8192])batch tensor: + attention_mask torch.Size([8, 1, 8192, 8192]) +torch.Size([8, 1, 1024, 8192])torch.Size([8, 8192]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: batch tensor:position_ids labelstorch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024])batch tensor after cp: + batch tensor after cp:tokens labels torch.Size([8, 1024])torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) + +batch tensor after cp: batch tensor after cp:labels loss_masktorch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +torch.Size([8, 1024])batch tensor after cp: + loss_maskbatch tensor after cp: torch.Size([8, 1024])attention_mask + batch tensor after cp:torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +attention_mask batch tensor after cp: torch.Size([8, 1, 1024, 8192])position_ids + batch tensor after cp:torch.Size([8, 1024]) +position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 22:10:59] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 65.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor:batch tensor: labels torch.Size([8, 8192]) +tokensbatch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])attention_mask + torch.Size([8, 1, 8192, 8192]) +batch tensor: batch tensor:labels position_idstorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: batch tensor:attention_mask torch.Size([8, 1, 8192, 8192]) +tokensbatch tensor: position_ids torch.Size([8, 8192]) +batch tensor:batch tensor: labels torch.Size([8, 8192]) tokens +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask batch tensor after cp:torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) + batch tensor: loss_mask torch.Size([8, 8192]) +torch.Size([8, 8192]) +tokensbatch tensor after cp: attention_masktorch.Size([8, 1024]) +torch.Size([8, 1, 1024, 8192])batch tensor after cp: + batch tensor after cp:labels position_idstorch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask batch tensor:batch tensor after cp: torch.Size([8, 1, 8192, 8192]) labels +tokens batch tensor:torch.Size([8, 8192]) torch.Size([8, 1024]) +position_ids + batch tensor:batch tensor after cp:torch.Size([8, 8192]) +batch tensor after cp:torch.Size([8, 1024]) +loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) + labelsloss_mask torch.Size([8, 1024])torch.Size([8, 8192]) + +batch tensor after cp: batch tensor:loss_mask attention_masktorch.Size([8, 1024]) +torch.Size([8, 1, 8192, 8192])batch tensor after cp: +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) + batch tensor:attention_mask position_ids torch.Size([8, 1, 1024, 8192])torch.Size([8, 8192]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: labels batch tensor:torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_masktokens torch.Size([8, 1024]) +batch tensor after cp: attention_masktorch.Size([8, 8192]) torch.Size([8, 1, 1024, 8192]) + +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_idsbatch tensor: torch.Size([8, 8192]) +batch tensor after cp: position_idsbatch tensor:batch tensor: labelstorch.Size([8, 1024]) tokens + torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) + tokens torch.Size([8, 8192]) +batch tensor: loss_masktorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor:batch tensor: labelsattention_mask torch.Size([8, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor after cp: batch tensor after cp:position_ids torch.Size([8, 1024])tokens + torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor:batch tensor: loss_maskposition_ids torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: attention_masktokens torch.Size([8, 1, 1024, 8192])torch.Size([8, 1024]) + +batch tensor after cp:batch tensor after cp: position_idslabels torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024])batch tensor after cp: +batch tensor after cp: tokensloss_mask torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor after cp:batch tensor after cp: attention_masklabels torch.Size([8, 1, 1024, 8192])torch.Size([8, 1024]) + +batch tensor after cp:batch tensor after cp: position_idsloss_mask torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor:batch tensor after cp: tokens tokenstorch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024])torch.Size([8, 8192]) + +batch tensor after cp: loss_mask batch tensor:torch.Size([8, 1024]) +labels batch tensor after cp: torch.Size([8, 8192])attention_mask + batch tensor:torch.Size([8, 1, 1024, 8192])batch tensor: +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) + loss_maskbatch tensor after cp: torch.Size([8, 8192])position_idstokens + batch tensor:torch.Size([8, 1024]) +attention_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) + +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: position_ids batch tensor:torch.Size([8, 8192])batch tensor after cp: + labelstokens torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor:batch tensor after cp: loss_masklabels torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192])batch tensor: +batch tensor after cp:batch tensor: loss_maskattention_mask torch.Size([8, 1024])torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + batch tensor:tokens labels torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])loss_mask + +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) + torch.Size([8, 8192]) +batch tensor:batch tensor: labelsattention_mask torch.Size([8, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor after cp: batch tensor:attention_mask position_idstorch.Size([8, 1, 1024, 8192]) +torch.Size([8, 8192])batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor:batch tensor: loss_maskposition_ids torch.Size([8, 8192])torch.Size([8, 8192]) + + position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp:batch tensor after cp: tokens torch.Size([8, 1024])tokens + batch tensor after cp: torch.Size([8, 1024])labels +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) + batch tensor after cp:torch.Size([8, 1024]) labels + batch tensor after cp:torch.Size([8, 1024]) loss_mask +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens batch tensor:torch.Size([8, 1024]) +torch.Size([8, 1024])batch tensor after cp: + loss_maskbatch tensor after cp: torch.Size([8, 1024])attention_mask + batch tensor after cp:torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokenslabels torch.Size([8, 1024]) +attention_maskbatch tensor after cp: torch.Size([8, 1, 1024, 8192])position_ids + batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor after cp: attention_mask batch tensor:torch.Size([8, 1, 1024, 8192]) +torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +labelsbatch tensor after cp: torch.Size([8, 8192])position_ids + batch tensor:torch.Size([8, 1024]) +loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 22:10:59] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 73.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens batch tensor:torch.Size([8, 8192]) + batch tensor:tokens labels torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])loss_mask +torch.Size([8, 8192]) +batch tensor:batch tensor: labelsattention_mask torch.Size([8, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor:batch tensor: loss_maskposition_ids torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens batch tensor:torch.Size([8, 8192]) +tokens batch tensor: labels torch.Size([8, 8192]) +batch tensor:torch.Size([8, 8192]) loss_mask + torch.Size([8, 8192]) +batch tensor: batch tensor:labels attention_mask torch.Size([8, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024])batch tensor after cp: + tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor:batch tensor: position_idsloss_mask torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp:batch tensor: loss_mask tokenstorch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_masktorch.Size([8, 8192]) +torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor:batch tensor after cp: labelsposition_ids torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens batch tensor:torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +loss_mask torch.Size([8, 8192])batch tensor: +batch tensor after cp:batch tensor: tokensloss_mask torch.Size([8, 1024])torch.Size([8, 8192]) + +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask batch tensor:torch.Size([8, 1, 8192, 8192]) +batch tensor: tokensposition_ids torch.Size([8, 8192]) + batch tensor:labels torch.Size([8, 8192]) +batch tensor after cp: batch tensor: labelsattention_mask batch tensor after cp:torch.Size([8, 1024])torch.Size([8, 1, 8192, 8192]) + +batch tensor after cp:tokensbatch tensor: loss_masktorch.Size([8, 1024])position_ids + torch.Size([8, 1024]) +batch tensor after cp:torch.Size([8, 8192])batch tensor after cp: + labelsattention_mask torch.Size([8, 1024]) +torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: batch tensor after cp:loss_mask position_idstorch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192])batch tensor: +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 1024])batch tensor after cp: + attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_masktokens torch.Size([8, 1, 8192, 8192]) +attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])position_ids + torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor:batch tensor after cp: loss_mask tokenstorch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor:torch.Size([8, 1024])batch tensor: + batch tensor after cp:attention_masktokens labelstorch.Size([8, 1, 8192, 8192]) +torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor:torch.Size([8, 8192])batch tensor after cp: +position_idsloss_mask batch tensor:torch.Size([8, 1024])torch.Size([8, 8192]) + +labels batch tensor after cp: torch.Size([8, 8192])attention_mask +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor:batch tensor after cp: attention_mask tokenstorch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) + batch tensor:torch.Size([8, 1, 1024, 8192]) +loss_mask batch tensor after cp:torch.Size([8, 8192]) +position_ids batch tensor:torch.Size([8, 1024]) +attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp:torch.Size([8, 8192]) +labels batch tensor after cp:torch.Size([8, 1024])batch tensor: +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) + batch tensor after cp:labelstokens loss_masktorch.Size([8, 8192]) torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +torch.Size([8, 1024]) +batch tensor: +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + batch tensor after cp:batch tensor after cp:loss_mask labels torch.Size([8, 8192]) attention_mask +torch.Size([8, 1024]) batch tensor: +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) + torch.Size([8, 1, 1024, 8192])attention_maskbatch tensor after cp: +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) + torch.Size([8, 1, 8192, 8192])batch tensor after cp:loss_mask + position_idsbatch tensor:torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +position_idstorch.Size([8, 1024])batch tensor after cp: torch.Size([8, 8192]) +attention_mask +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) + torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 22:10:59] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 66.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens batch tensor:torch.Size([8, 8192]) + batch tensor:tokens labels torch.Size([8, 8192]) +batch tensor: loss_masktorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: labelsbatch tensor: torch.Size([8, 8192])attention_mask + batch tensor: loss_masktorch.Size([8, 1, 8192, 8192]) +torch.Size([8, 8192]) +batch tensor:batch tensor: position_idsattention_mask torch.Size([8, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor:batch tensor after cp: tokenstokens torch.Size([8, 1024]) +batch tensor after cp: labelstorch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor:batch tensor after cp: labelsloss_mask torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor: batch tensor after cp:loss_mask attention_masktorch.Size([8, 8192]) +torch.Size([8, 1, 1024, 8192])batch tensor: + batch tensor after cp:attention_mask position_ids torch.Size([8, 1, 8192, 8192])torch.Size([8, 1024]) + +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024])batch tensor after cp: + tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192])batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + tokens batch tensor: torch.Size([8, 1024])labels +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp:batch tensor: tokensattention_mask torch.Size([8, 1024])torch.Size([8, 1, 8192, 8192]) + +batch tensor after cp: labelsbatch tensor: torch.Size([8, 1024])position_ids +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) + batch tensor after cp: torch.Size([8, 8192])labels + batch tensor:torch.Size([8, 1024]) +loss_maskbatch tensor after cp: torch.Size([8, 8192])loss_mask + torch.Size([8, 1024])batch tensor: + attention_maskbatch tensor after cp: attention_masktorch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask batch tensor:torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: torch.Size([8, 8192])loss_mask + torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +torch.Size([8, 1, 1024, 8192])batch tensor: + position_idsbatch tensor after cp: torch.Size([8, 8192])position_ids + torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: tokenstokensposition_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +torch.Size([8, 1024]) +torch.Size([8, 8192])batch tensor after cp: +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor:batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) + labelsbatch tensor: torch.Size([8, 1024])labels + batch tensor after cp:torch.Size([8, 8192]) +batch tensor:batch tensor: tokenstokens torch.Size([8, 8192]) +torch.Size([8, 8192]) +tokensbatch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +loss_maskbatch tensor: torch.Size([8, 1024])loss_maskbatch tensor: +batch tensor: batch tensor:labels labels torch.Size([8, 8192])torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) + batch tensor after cp: torch.Size([8, 8192]) tokens + +batch tensor: batch tensor:loss_mask loss_mask torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor after cp:batch tensor: tokensattention_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 1024]) + +batch tensor:batch tensor after cp: position_idslabels torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +attention_mask batch tensor:torch.Size([8, 1, 1024, 8192])batch tensor: +batch tensor:batch tensor: attention_maskattention_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: position_idsbatch tensor after cp: torch.Size([8, 8192])tokensbatch tensor: +torch.Size([8, 8192]) attention_maskbatch tensor after cp: +batch tensor:batch tensor: position_idsposition_ids torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor after cp: position_ids torch.Size([8, 1024]) +torch.Size([8, 1024]) +tokens torch.Size([8, 1, 8192, 8192]) batch tensor: +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor:batch tensor: tokens tokens torch.Size([8, 8192]) +tokensbatch tensor after cp: labels torch.Size([8, 1024]) +torch.Size([8, 8192])batch tensor after cp: +position_ids batch tensor: labelstorch.Size([8, 1024])position_idstorch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor: labels batch tensor: torch.Size([8, 8192])labels + loss_maskbatch tensor: torch.Size([8, 1024])labels + torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_masktorch.Size([8, 8192]) +torch.Size([8, 8192]) + batch tensor after cp:torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192])batch tensor: tokens torch.Size([8, 8192]) + + +batch tensor:batch tensor: loss_maskloss_maskbatch tensor: torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor:tokensbatch tensor: attention_mask attention_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor:torch.Size([8, 8192])batch tensor: + position_idsposition_ids batch tensor: torch.Size([8, 8192]) torch.Size([8, 8192]) +labels + torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokensbatch tensor after cp: torch.Size([8, 1024])tokens +batch tensor: loss_maskbatch tensor: torch.Size([8, 8192])attention_mask + batch tensor:torch.Size([8, 1, 8192, 8192]) +attention_mask batch tensor:torch.Size([8, 1, 8192, 8192]) +position_ids batch tensor:torch.Size([8, 8192]) +attention_maskbatch tensor: loss_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) + batch tensor after cp:torch.Size([8, 1024]) +labels batch tensor after cp:torch.Size([8, 1024]) +position_ids torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor after cp: + position_idsbatch tensor: torch.Size([8, 1024])attention_mask +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +labelsbatch tensor after cp: torch.Size([8, 1024])loss_mask + batch tensor after cp:torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) + torch.Size([8, 1, 8192, 8192]) +batch tensor after cp:batch tensor after cp:batch tensor after cp: tokenstokenstokens torch.Size([8, 1024])torch.Size([8, 1024])torch.Size([8, 1024]) + + +loss_mask batch tensor after cp:torch.Size([8, 1024]) +attention_mask batch tensor after cp: torch.Size([8, 1, 1024, 8192])attention_mask +batch tensor: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp:batch tensor after cp:batch tensor after cp: labelslabelslabels torch.Size([8, 1024])torch.Size([8, 1024])torch.Size([8, 1024]) + + + batch tensor after cp:torch.Size([8, 1, 1024, 8192]) +position_ids batch tensor after cp: torch.Size([8, 1024])position_ids + torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp:batch tensor after cp: loss_maskloss_maskloss_mask torch.Size([8, 1024])torch.Size([8, 1024]) + +torch.Size([8, 1024])batch tensor after cp:batch tensor after cp: +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) + batch tensor after cp: tokensbatch tensor after cp:attention_mask torch.Size([8, 1, 1024, 8192])attention_maskattention_masktorch.Size([8, 1024]) + + batch tensor after cp:batch tensor after cp:torch.Size([8, 1, 1024, 8192])torch.Size([8, 1, 1024, 8192]) +position_idslabels +batch tensor after cp: batch tensor after cp:torch.Size([8, 1024])torch.Size([8, 1024]) position_ids +position_ids + batch tensor after cp:torch.Size([8, 1024])torch.Size([8, 1024]) + +loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192])batch tensor after cp: +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + batch tensor after cp:tokens position_ids torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokensbatch tensor after cp: torch.Size([8, 1024])tokens + batch tensor after cp:torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +labels batch tensor after cp:torch.Size([8, 1024]) +labelsbatch tensor after cp: torch.Size([8, 1024])loss_mask +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) + batch tensor after cp:torch.Size([8, 1024]) +loss_mask batch tensor after cp:torch.Size([8, 1024]) +attention_mask batch tensor after cp: torch.Size([8, 1, 1024, 8192])attention_mask + batch tensor after cp:torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +position_ids batch tensor after cp:torch.Size([8, 1024]) +position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 22:10:59] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 66.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192])batch tensor: +batch tensor: loss_mask tokenstorch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) + +batch tensor: position_ids batch tensor:torch.Size([8, 8192]) +labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_idsbatch tensor after cp: torch.Size([8, 1024])tokens +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) + torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: labelstokens torch.Size([8, 1024]) +torch.Size([8, 1024])batch tensor after cp: +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: loss_mask batch tensor:torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) + loss_maskbatch tensor after cp: torch.Size([8, 1024]) +labelsbatch tensor after cp: torch.Size([8, 1024])attention_mask + batch tensor after cp:torch.Size([8, 1, 1024, 8192]) +loss_mask batch tensor after cp:torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokensattention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +position_ids batch tensor after cp:torch.Size([8, 1024]) attention_mask + torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens batch tensor after cp: tokenstorch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor:batch tensor after cp: labelslabels torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: labelsbatch tensor: torch.Size([8, 1024])batch tensor: +batch tensor:batch tensor after cp: loss_maskloss_mask torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) + batch tensor after cp:tokens torch.Size([8, 8192])tokens +batch tensor:batch tensor after cp: attention_maskattention_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 1, 1024, 8192]) + +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +loss_mask batch tensor:torch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor:batch tensor after cp: position_idsposition_ids torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +labelsbatch tensor:batch tensor after cp: torch.Size([8, 8192])labelsattention_mask + batch tensor:torch.Size([8, 8192])torch.Size([8, 1, 1024, 8192]) + +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +loss_maskbatch tensor:batch tensor after cp: torch.Size([8, 8192])loss_mask +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +position_ids batch tensor:torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: batch tensor after cp:labels torch.Size([8, 1024])tokens + batch tensor after cp: torch.Size([8, 1024])loss_mask +batch tensor: tokens torch.Size([8, 8192])batch tensor: +batch tensor: tokens torch.Size([8, 8192]) +attention_maskbatch tensor: torch.Size([8, 1, 8192, 8192])attention_mask + batch tensor after cp:torch.Size([8, 1024]) +labels batch tensor after cp:torch.Size([8, 1024]) +batch tensor: tokenslabels torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])loss_mask +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) + batch tensor:batch tensor:torch.Size([8, 1, 8192, 8192]) +attention_maskbatch tensor after cp: torch.Size([8, 1, 1024, 8192])loss_mask + torch.Size([8, 1024])batch tensor after cp: +batch tensor:batch tensor: labelsattention_mask torch.Size([8, 8192]) +torch.Size([8, 1, 8192, 8192])batch tensor: +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) + position_idsbatch tensor: tokens torch.Size([8, 8192]) position_ids + torch.Size([8, 8192])torch.Size([8, 8192]) + + position_idsbatch tensor after cp: attention_mask torch.Size([8, 1024]) +torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + loss_maskbatch tensor: position_idstorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask batch tensor after cp:torch.Size([8, 1, 8192, 8192]) +tokens batch tensor: torch.Size([8, 1024])position_ids + torch.Size([8, 8192])batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: labels tokenstorch.Size([8, 1024]) +batch tensor after cp:torch.Size([8, 1024]) +loss_mask batch tensor after cp:torch.Size([8, 1024]) +labels batch tensor after cp:torch.Size([8, 1024]) +attention_mask batch tensor after cp:torch.Size([8, 1, 1024, 8192]) loss_mask + torch.Size([8, 1024])batch tensor after cp: + batch tensor after cp:position_ids attention_mask torch.Size([8, 1024])torch.Size([8, 1, 1024, 8192]) + +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 22:10:59] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 67.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp:batch tensor: position_idstokens torch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens batch tensor:torch.Size([8, 8192]) + batch tensor:tokens labels torch.Size([8, 8192]) +batch tensor: loss_masktorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor:batch tensor: attention_masklabels torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) + +batch tensor:batch tensor: position_idsloss_mask torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor:batch tensor: tokens tokens torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: + labels batch tensor:torch.Size([8, 8192]) +labelsbatch tensor: loss_masktorch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) + loss_maskbatch tensor: torch.Size([8, 8192])attention_mask + torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: batch tensor:attention_mask position_ids torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) + +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp:batch tensor after cp: attention_masktokens torch.Size([8, 1, 1024, 8192]) +torch.Size([8, 1024])batch tensor after cp: +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192])batch tensor after cp: + position_idsbatch tensor after cp: torch.Size([8, 1024])labels + torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + batch tensor:tokens attention_mask torch.Size([8, 1024])torch.Size([8, 1, 8192, 8192]) + +batch tensor after cp:batch tensor: labelsposition_ids torch.Size([8, 1024])torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens batch tensor:torch.Size([8, 8192]) + batch tensor:tokens labels torch.Size([8, 8192]) +batch tensor: torch.Size([8, 8192])loss_mask +torch.Size([8, 8192]) +batch tensor:batch tensor: labelsattention_mask torch.Size([8, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor:batch tensor: loss_maskposition_ids torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: + loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp:batch tensor: loss_mask torch.Size([8, 1024])tokens + batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +torch.Size([8, 8192])batch tensor after cp: +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024])batch tensor: +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + position_ids batch tensor:torch.Size([8, 1024]) +labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokenstokens torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: batch tensor:labels labelstorch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor: + loss_maskbatch tensor: torch.Size([8, 8192])loss_mask +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) + torch.Size([8, 8192])batch tensor: + attention_maskbatch tensor: torch.Size([8, 1, 8192, 8192])attention_mask + batch tensor:torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +position_ids batch tensor:torch.Size([8, 8192]) +position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: labelstokens torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +torch.Size([8, 1024])batch tensor after cp: + loss_maskbatch tensor after cp: torch.Size([8, 1024])labels + batch tensor after cp:torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +attention_maskbatch tensor after cp: loss_masktorch.Size([8, 1, 1024, 8192]) +torch.Size([8, 1024]) +batch tensor after cp: batch tensor after cp:position_ids attention_masktorch.Size([8, 1024]) +torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: batch tensor:tokens torch.Size([8, 1024])tokens +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) + batch tensor after cp: labels torch.Size([8, 1024]) +torch.Size([8, 8192])batch tensor after cp: +batch tensor after cp:batch tensor after cp: loss_masktokens torch.Size([8, 1024]) +torch.Size([8, 1024])batch tensor after cp: +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +loss_mask batch tensor:torch.Size([8, 1024]) +labels batch tensor after cp:torch.Size([8, 8192]) +attention_mask batch tensor: torch.Size([8, 1, 1024, 8192])loss_mask + batch tensor after cp:torch.Size([8, 8192]) + attention_maskbatch tensor after cp: torch.Size([8, 1, 1024, 8192])labels + batch tensor after cp:torch.Size([8, 1024]) +position_idsbatch tensor after cp: torch.Size([8, 1024])loss_mask + torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +position_ids batch tensor: torch.Size([8, 1024])attention_mask + torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 22:11:00] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 346.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 22:11:00 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.027870893478393555 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.027909517288208008 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.027666330337524414 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.027667999267578125 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.027701616287231445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.02792954444885254 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.02785325050354004 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.027776718139648438 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.02798604965209961 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.028397560119628906 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.028036832809448242 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.028441905975341797 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.02855229377746582 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.02845931053161621 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.028378725051879883 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.028470516204833984 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.028519153594970703 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.028586626052856445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.028504133224487305 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.02868199348449707 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.0286409854888916 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.028738021850585938 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.02888011932373047 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.028597354888916016 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.028883695602416992 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.028697729110717773 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.029085874557495117 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.029095888137817383 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.02889227867126465 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.02945685386657715 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.03655123710632324 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.2626807689666748 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92346368), 2), (np.int64(92346368), 3), (np.int64(92343296), 4), (np.int64(92343296), 5), (np.int64(92340224), 6), (np.int64(92340224), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92346368), 2), (np.int64(92346368), 3), (np.int64(92343296), 4), (np.int64(92343296), 5), (np.int64(92340224), 6), (np.int64(92340224), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92346368), 2), (np.int64(92346368), 3), (np.int64(92343296), 4), (np.int64(92343296), 5), (np.int64(92340224), 6), (np.int64(92340224), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92346368), 2), (np.int64(92346368), 3), (np.int64(92343296), 4), (np.int64(92343296), 5), (np.int64(92340224), 6), (np.int64(92340224), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92346368), 2), (np.int64(92346368), 3), (np.int64(92343296), 4), (np.int64(92343296), 5), (np.int64(92340224), 6), (np.int64(92340224), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92346368), 2), (np.int64(92346368), 3), (np.int64(92343296), 4), (np.int64(92343296), 5), (np.int64(92340224), 6), (np.int64(92340224), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92346368), 2), (np.int64(92346368), 3), (np.int64(92343296), 4), (np.int64(92343296), 5), (np.int64(92340224), 6), (np.int64(92340224), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92346368), 2), (np.int64(92346368), 3), (np.int64(92343296), 4), (np.int64(92343296), 5), (np.int64(92340224), 6), (np.int64(92340224), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1772217750549316 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2024016380310059 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2025315761566162 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2162127494812012 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.202317476272583 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2014853954315186 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1774466037750244 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2225492000579834 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2220752239227295 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2018754482269287 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1857964992523193 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1958940029144287 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1865172386169434 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.187173843383789 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.222815752029419 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.222672462463379 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1859872341156006 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.177595615386963 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2016899585723877 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1865601539611816 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1879940032958984 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2233633995056152 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1860811710357666 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.177917718887329 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2224359512329102 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2225983142852783 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.205193042755127 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1779322624206543 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2226333618164062 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.178072214126587 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.018216609954833984 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.183115005493164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.012907028198242188 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.012922286987304688 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.016757488250732422 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1632042 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.016071796417236328 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.016729116439819336 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.163214 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1632147 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.01286625862121582 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1632485 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1632476 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.936622619628906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.507469177246094e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.459785461425781e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, plan time: 0.016574621200561523 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, plan time: 0.015942096710205078 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, plan time: 0.01649022102355957 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, plan time: 0.015959739685058594 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, plan time: 0.016663551330566406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, plan time: 0.01662278175354004 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.016683101654052734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.016705989837646484 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1632733 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, plan time: 0.014834165573120117 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, plan time: 0.016202211380004883 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, plan time: 0.015931129455566406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, plan time: 0.015976428985595703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1638587 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.015320301055908203 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.016478776931762695 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1647887 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.016204357147216797 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.016004085540771484 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.626678466796875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.580352783203125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1646402 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1646411 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1646426 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1646457 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1638653 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1647978 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.793571472167969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.164659 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1638863 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1648169 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.016359329223632812 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.00443720817565918 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, plan time: 0.015568971633911133 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, plan time: 0.016169309616088867 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1638894 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1638875 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, plan time: 0.016685009002685547 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.01587653160095215 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1637797 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, plan time: 0.016043424606323242 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, plan time: 0.016170501708984375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, plan time: 0.01543736457824707 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1648264 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00014448165893554688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.164702 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1646988 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.508827209472656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.556510925292969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.604194641113281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.164705 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.389617919921875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.130073547363281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1639206 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1648214 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1648486 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.817413330078125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.724761962890625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.772445678710938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.559226989746094e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.580352783203125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.163927 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.1648347 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.164854 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.0558319091796875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.508827209472656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.16393 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.8650970458984375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.127357482910156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.723403930664062e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.270408630371094e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.390975952148438e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.270408630371094e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.914138793945312e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.29425048828125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.103515625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.508827209472656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.678436279296875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.557868957519531e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.016081809997558594 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543862.167712 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 4.982948303222656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.049709320068359375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2149477 rank: 10, write(async) time: 0.0501253604888916 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05007123947143555 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2152772 rank: 11, write(async) time: 0.050485849380493164 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.050336599349975586 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2146971 rank: 20, write(async) time: 0.05080890655517578 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05118680000305176 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2155454 rank: 22, write(async) time: 0.051613569259643555 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05143165588378906 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2157013 rank: 23, write(async) time: 0.05184030532836914 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05154299736022949 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05158233642578125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2167501 rank: 13, write(async) time: 0.051950693130493164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2159088 rank: 21, write(async) time: 0.05198979377746582 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05221819877624512 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2165594 rank: 17, write(async) time: 0.05266833305358887 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05283403396606445 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2171159 rank: 19, write(async) time: 0.05324816703796387 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.054111480712890625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.219351 rank: 14, write(async) time: 0.05452251434326172 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.055315494537353516 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2205532 rank: 15, write(async) time: 0.05573463439941406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05535435676574707 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.219765 rank: 16, write(async) time: 0.05587625503540039 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.056180715560913086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2198076 rank: 3, write(async) time: 0.05659008026123047 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05654549598693848 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.220183 rank: 1, write(async) time: 0.0569758415222168 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.057036399841308594 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05713605880737305 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2223346 rank: 9, write(async) time: 0.05748629570007324 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.22242 rank: 12, write(async) time: 0.05760359764099121 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05754685401916504 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05771780014038086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2226617 rank: 28, write(async) time: 0.05800318717956543 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.221419 rank: 7, write(async) time: 0.0581669807434082 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05776190757751465 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2229145 rank: 25, write(async) time: 0.05821037292480469 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05832648277282715 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2219355 rank: 4, write(async) time: 0.05872297286987305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.058768510818481445 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.224052 rank: 8, write(async) time: 0.05919837951660156 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05906057357788086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2241442 rank: 31, write(async) time: 0.05949878692626953 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06125974655151367 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06135869026184082 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.226364 rank: 27, write(async) time: 0.06171774864196777 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2264802 rank: 29, write(async) time: 0.06177234649658203 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06242680549621582 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2275846 rank: 24, write(async) time: 0.06288456916809082 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06387138366699219 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.228237 rank: 18, write(async) time: 0.06430768966674805 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06259036064147949 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.230741 rank: 0, write(async) time: 0.06302905082702637 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.07262587547302246 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2369187 rank: 5, write(async) time: 0.07314801216125488 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 1.621246337890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 1.8596649169921875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 1.621246337890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 2.0503997802734375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 1.621246337890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 1.52587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 1.52587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.10798811912536621 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.2730708 rank: 30, write(async) time: 0.1084299087524414 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 0.02862095832824707 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 0.029192686080932617 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.026767730712890625 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 0.028331518173217773 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 0.0273590087890625 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 0.02991175651550293 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.02869248390197754 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.030248641967773438 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 0.02901625633239746 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 3.7670135498046875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 0.029465913772583008 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.030627727508544922 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.030909061431884766 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 0.032483577728271484 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 0.03309774398803711 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.032834768295288086 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 0.03457164764404297 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 0.03425192832946777 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 0.03312253952026367 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.03468036651611328 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.03484797477722168 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 0.03964042663574219 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 0.03475594520568848 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.03332686424255371 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.033951520919799805 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 2.4080276489257812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 2.0265579223632812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.15984320640563965 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.3249097 rank: 26, write(async) time: 0.16026663780212402 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.0435633659362793 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 0.03074789047241211 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.0317690372467041 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.03240084648132324 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21934080, before: 1726971904, after: 1748905984 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.04314994812011719 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 0.03551077842712402 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30289920, before: 1732165632, after: 1762455552 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 135168, before: 1707614208, after: 1707749376 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.24479913711547852 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.408495 rank: 6, write(async) time: 0.24524950981140137 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.24867796897888184 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543862.4124117 rank: 2, write(async) time: 0.24913883209228516 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22048768, before: 1724121088, after: 1746169856 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22151168, before: 1740525568, after: 1762676736 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22147072, before: 1748897792, after: 1771044864 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51396608, before: 1745178624, after: 1796575232 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30281728, before: 1723723776, after: 1754005504 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30539776, before: 1727209472, after: 1757749248 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21995520, before: 1725460480, after: 1747456000 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21946368, before: 1726816256, after: 1748762624 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51470336, before: 1716547584, after: 1768017920 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51499008, before: 1726578688, after: 1778077696 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72245248, before: 1724071936, after: 1796317184 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51396608, before: 1707581440, after: 1758978048 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 4096, before: 2047520768, after: 2047524864 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72351744, before: 1740488704, after: 1812840448 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30601216, before: 1805164544, after: 1835765760 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30015488, before: 1736388608, after: 1766404096 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72347648, before: 1748897792, after: 1821245440 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72204288, before: 1726906368, after: 1799110656 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 131072, before: 1736183808, after: 1736314880 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51408896, before: 1720303616, after: 1771712512 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30343168, before: 1727709184, after: 1758052352 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30367744, before: 1736802304, after: 1767170048 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.037406206130981445 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72196096, before: 1725460480, after: 1797656576 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30507008, before: 1773670400, after: 1804177408 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.502028, rank: 19, write(sync,parallel): 0.21661114692687988 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 2.193450927734375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72142848, before: 1726816256, after: 1798959104 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72450048, before: 1727209472, after: 1799659520 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5094185, rank: 22, write(sync,parallel): 0.22568798065185547 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47214592, before: 1720303616, after: 1767518208 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72261632, before: 1732165632, after: 1804427264 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47185920, before: 1745195008, after: 1792380928 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30261248, before: 1736376320, after: 1766637568 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72089600, before: 1736388608, after: 1808478208 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30355456, before: 1742102528, after: 1772457984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47120384, before: 1707581440, after: 1754701824 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 55439360, before: 1763463168, after: 1818902528 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46977024, before: 1715302400, after: 1762279424 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47247360, before: 1763463168, after: 1810710528 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72327168, before: 1723723776, after: 1796050944 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51339264, before: 1715326976, after: 1766666240 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72376320, before: 1727709184, after: 1800085504 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5309007, rank: 17, write(sync,parallel): 0.24512863159179688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.531418, rank: 23, write(sync,parallel): 0.2463991641998291 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72429568, before: 1805164544, after: 1877594112 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 51412992, before: 1726578688, after: 1777991680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72208384, before: 1736376320, after: 1808584704 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47276032, before: 1716547584, after: 1763823616 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.0351107120513916 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5413258, rank: 21, write(sync,parallel): 0.2575099468231201 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.29s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.29s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.545238, rank: 18, write(sync,parallel): 0.24564146995544434 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51232768, before: 1732173824, after: 1783406592 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5475109, rank: 20, write(sync,parallel): 0.26131749153137207 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.549076, rank: 10, write(sync,parallel): 0.26387619972229004 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47038464, before: 1732173824, after: 1779212288 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72196096, before: 1742102528, after: 1814298624 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72310784, before: 1736802304, after: 1809113088 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72314880, before: 1773670400, after: 1845985280 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5558548, rank: 29, write(sync,parallel): 0.2547147274017334 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.555982, rank: 25, write(sync,parallel): 0.26513147354125977 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5612793, rank: 31, write(sync,parallel): 0.26437854766845703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5620127, rank: 11, write(sync,parallel): 0.27297306060791016 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5627794, rank: 13, write(sync,parallel): 0.2751612663269043 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.564954, rank: 14, write(sync,parallel): 0.27228546142578125 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5710325, rank: 30, write(sync,parallel): 0.22948813438415527 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.573085, rank: 9, write(sync,parallel): 0.27316975593566895 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5736406, rank: 27, write(sync,parallel): 0.2723660469055176 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5737698, rank: 24, write(sync,parallel): 0.26906251907348633 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.575888, rank: 16, write(sync,parallel): 0.2716209888458252 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.576961, rank: 28, write(sync,parallel): 0.2768990993499756 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5903292, rank: 12, write(sync,parallel): 0.2875967025756836 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5919325, rank: 15, write(sync,parallel): 0.2973814010620117 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5917146, rank: 26, write(sync,parallel): 0.19258689880371094 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.5940244, rank: 8, write(sync,parallel): 0.29097986221313477 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.27s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.37s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 303104, before: 1722220544, after: 1722523648 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108974080, before: 1715765248, after: 1824739328 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108883968, before: 1714016256, after: 1822900224 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108920832, before: 1713242112, after: 1822162944 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.7889595, rank: 4, write(sync,parallel): 0.47855710983276367 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.8022106, rank: 5, write(sync,parallel): 0.46688032150268555 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.8103878, rank: 7, write(sync,parallel): 0.5015642642974854 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.56s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.56s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.58s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108957696, before: 1711529984, after: 1820487680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543862.9020298, rank: 6, write(sync,parallel): 0.40463709831237793 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.49s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212381696, before: 1736183808, after: 1948565504 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212238336, before: 1707614208, after: 1919852544 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543863.0781407, rank: 1, write(sync,parallel): 0.7260236740112305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543863.1003325, rank: 3, write(sync,parallel): 0.7493710517883301 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.80s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.82s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212246528, before: 1722220544, after: 1934467072 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212008960, before: 2047520768, after: 2259529728 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543863.2426724, rank: 2, write(sync,parallel): 0.698829174041748 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543863.286197, rank: 0, write(sync,parallel): 0.8897860050201416 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.79s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.98s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.332996, 6, gather: 0.39057350158691406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3329992, 7, gather: 0.48116183280944824 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3330522, 2, gather: 0.0371854305267334 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3331277, 4, gather: 0.5027594566345215 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3333151, 3, gather: 0.19176125526428223 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.333389, 5, gather: 0.48398327827453613 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3334336, 1, gather: 0.2174057960510254 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3341658, 23, gather: 0.7681064605712891 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3351157, 10, gather: 0.7515478134155273 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3351448, 14, gather: 0.7384326457977295 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3351383, 11, gather: 0.7392773628234863 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3342073, 22, gather: 0.788388729095459 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.334256, 20, gather: 0.7486767768859863 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3342965, 21, gather: 0.7550573348999023 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.334514, 19, gather: 0.7914633750915527 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3352294, 13, gather: 0.7403316497802734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3345413, 16, gather: 0.7174413204193115 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3353944, 8, gather: 0.7042801380157471 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.335487, 26, gather: 0.7095804214477539 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3354828, 25, gather: 0.7425761222839355 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3347325, 17, gather: 0.7683637142181396 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3354836, 15, gather: 0.7102792263031006 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3354375, 9, gather: 0.725844144821167 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3357332, 27, gather: 0.7262907028198242 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3357406, 28, gather: 0.7205531597137451 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.335804, 31, gather: 0.7350368499755859 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3358142, 24, gather: 0.72275710105896 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3358545, 12, gather: 0.7064964771270752 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.335951, 29, gather: 0.7402985095977783 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.334999, 18, gather: 0.7513713836669922 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3360739, 30, gather: 0.7191934585571289 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.3371012, 0, gather: 0.007096290588378906 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543863.35248, metadata_write: 0.01521444320678711 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0249s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0591s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2386s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2133s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5242s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4122s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5029s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5055s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7301s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7429s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7411s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7466s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7635s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7603s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7595s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7611s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7726s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7552s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7606s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7891s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7385s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7252s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7308s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7394s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7717s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8125s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7699s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7896s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.8098s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7763s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7273s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.7467s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.006078004837036133 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.006087064743041992 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.006057024002075195 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.006069660186767578 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.006012678146362305 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.006011962890625 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.006026506423950195 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.00597691535949707 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.006018400192260742 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.006005048751831055 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.006176948547363281 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.0059926509857177734 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.005982637405395508 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.0059642791748046875 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.006060361862182617 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.0060024261474609375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.006006717681884766 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.006016969680786133 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.00606989860534668 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.006033420562744141 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.0060367584228515625 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.0059528350830078125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.006136417388916016 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.006131410598754883 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.006100654602050781 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.005889415740966797 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.00599360466003418 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.006181955337524414 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.0062541961669921875 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.006315946578979492 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.006336212158203125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.006344318389892578 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: batch tensor:loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens batch tensor: torch.Size([8, 8192])tokens + tokensbatch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labelstorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor:batch tensor: loss_mask labelstorch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor:batch tensor: attention_maskloss_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) + +batch tensor: batch tensor:position_ids attention_masktorch.Size([8, 8192]) +torch.Size([8, 1, 8192, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: batch tensor:position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) + tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([8, 8192])torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) + +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor:batch tensor after cp: labelslabels torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor:batch tensor after cp: loss_maskloss_mask torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp:batch tensor: attention_maskattention_mask torch.Size([8, 1, 1024, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor after cp: batch tensor:position_ids position_idstorch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor: tokens batch tensor: tokenstorch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: loss_mask batch tensor: torch.Size([8, 8192])labels + batch tensor:torch.Size([8, 8192]) +attention_mask batch tensor:torch.Size([8, 1, 8192, 8192]) +loss_mask batch tensor:torch.Size([8, 8192]) +position_ids batch tensor:torch.Size([8, 8192]) +attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels batch tensor after cp:torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +tokensbatch tensor after cp: loss_mask torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp:batch tensor after cp: labelsattention_mask torch.Size([8, 1024])torch.Size([8, 1, 1024, 8192]) + +batch tensor after cp:batch tensor after cp: loss_maskposition_ids torch.Size([8, 1024])torch.Size([8, 1024]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp:batch tensor after cp: position_idstokens torch.Size([8, 1024]) +torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens batch tensor:torch.Size([8, 1024]) +batch tensor after cp: tokenslabels torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192])batch tensor: +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_masktorch.Size([8, 8192]) torch.Size([8, 1024]) + +batch tensor: attention_masktokens torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp:batch tensor after cp:batch tensor: attention_masklabels torch.Size([8, 1, 1024, 8192])tokenstorch.Size([8, 8192]) + batch tensor after cp: + position_idsbatch tensor:torch.Size([8, 1024]) +loss_masktorch.Size([8, 1024])batch tensor after cp: + torch.Size([8, 8192])labels + torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: batch tensor after cp:attention_mask loss_masktorch.Size([8, 1, 8192, 8192]) +torch.Size([8, 1024]) +batch tensor after cp:batch tensor: attention_maskposition_ids torch.Size([8, 1, 1024, 8192])torch.Size([8, 8192])batch tensor after cp: + + batch tensor after cp:tokens position_ids torch.Size([8, 1024])torch.Size([8, 1024]) + +torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: loss_maskbatch tensor: torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels batch tensor:torch.Size([8, 1024]) +batch tensor after cp: tokensloss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) + batch tensor:tokens attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_idstorch.Size([8, 8192]) torch.Size([8, 8192]) + +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 8192])torch.Size([8, 1, 1024, 8192]) + +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: batch tensor:position_ids labelstorch.Size([8, 1024]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: batch tensor after cp:tokens tokenstorch.Size([8, 1024]) +torch.Size([8, 1024])batch tensor after cp: + labelsbatch tensor after cp: torch.Size([8, 1024])labels +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: labels torch.Size([8, 8192]) + batch tensor after cp:torch.Size([8, 1024]) +loss_mask batch tensor after cp: torch.Size([8, 1024])loss_mask + batch tensor after cp:torch.Size([8, 1024]) +attention_mask batch tensor after cp: torch.Size([8, 1, 1024, 8192])attention_mask + batch tensor after cp:torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +position_idsbatch tensor after cp: position_idstorch.Size([8, 1024]) +torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 10 +Done exporting trace 10 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (2731.43, 2733.34) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.276667E+01 | lm loss PPL: 3.503448E+05 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: batch tensor:position_ids torch.Size([8, 8192]) +tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor:batch tensor: attention_mask torch.Size([8, 1, 8192, 8192])tokens + batch tensor: position_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp:batch tensor: tokensattention_mask torch.Size([8, 1, 8192, 8192])torch.Size([8, 1024]) + +batch tensor:batch tensor after cp: position_idslabels torch.Size([8, 8192])torch.Size([8, 1024]) + +batch tensor after cp: batch tensor:loss_mask torch.Size([8, 1024]) + batch tensor after cp: tokensattention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_idstorch.Size([8, 8192]) torch.Size([8, 1024]) + +batch tensor:batch tensor after cp: tokenstokens torch.Size([8, 1024]) +batch tensor after cp: labelstorch.Size([8, 8192]) +torch.Size([8, 1024]) +batch tensor after cp:batch tensor: loss_masklabels torch.Size([8, 1024])torch.Size([8, 8192]) + +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: batch tensor:attention_mask loss_masktorch.Size([8, 1, 1024, 8192]) +torch.Size([8, 8192])batch tensor after cp: + position_idsbatch tensor: torch.Size([8, 1024])attention_mask + torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_maskbatch tensor: torch.Size([8, 8192]) + batch tensor:tokens attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: torch.Size([8, 8192])position_ids + torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: batch tensor:loss_mask torch.Size([8, 1024]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +tokens batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192])torch.Size([8, 8192])batch tensor: + +batch tensor after cp: position_ids tokenstorch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor:batch tensor after cp: labels torch.Size([8, 1024])tokens + batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: labels torch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor after cp: +attention_mask batch tensor:torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor:batch tensor: loss_masklabels torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor: loss_mask torch.Size([8, 8192]) +labelsbatch tensor after cp: torch.Size([8, 8192])position_ids + batch tensor:torch.Size([8, 1024]) +loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor:batch tensor: loss_maskattention_mask torch.Size([8, 8192])torch.Size([8, 1, 8192, 8192]) + +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: batch tensor after cp:loss_mask torch.Size([8, 1024])tokens + batch tensor after cp: torch.Size([8, 1024])attention_mask + batch tensor after cp:torch.Size([8, 1, 1024, 8192]) +labels batch tensor after cp:torch.Size([8, 1024]) +batch tensor:batch tensor: attention_maskposition_ids torch.Size([8, 1, 8192, 8192])torch.Size([8, 8192]) + +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +position_ids batch tensor after cp: torch.Size([8, 1024])loss_mask + torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor: labels batch tensor after cp:torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +tokens batch tensor:torch.Size([8, 1024]) +loss_mask batch tensor after cp:torch.Size([8, 8192]) +labels batch tensor:torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +attention_maskbatch tensor after cp: loss_masktorch.Size([8, 1, 8192, 8192]) +torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp:batch tensor after cp: labelstokens torch.Size([8, 1024]) +batch tensor:batch tensor after cp: position_idsattention_mask torch.Size([8, 8192])torch.Size([8, 1, 1024, 8192]) + +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp:torch.Size([8, 1024]) +loss_mask batch tensor after cp:torch.Size([8, 1024]) +labels batch tensor after cp:torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +attention_maskbatch tensor after cp: loss_masktorch.Size([8, 1, 1024, 8192]) +torch.Size([8, 1024])batch tensor after cp: +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) + batch tensor after cp:position_ids attention_mask torch.Size([8, 1024]) +torch.Size([8, 1, 1024, 8192]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor: tokens torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +batch tensor: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 8192, 8192]) +batch tensor: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 1024]) +batch tensor after cp: labels torch.Size([8, 1024]) +batch tensor after cp: loss_mask torch.Size([8, 1024]) +batch tensor after cp: attention_mask torch.Size([8, 1, 1024, 8192]) +batch tensor after cp: position_ids torch.Size([8, 1024]) +Start exporting trace 11 +Done exporting trace 11 +(min, max) time across ranks (ms): + evaluate .......................................: (34.35, 38.28) +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.276667E+01 | lm loss PPL: 3.503448E+05 | +---------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Running ctx_length=2048, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 2048 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 2048 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 2048 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 2048 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.050 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.587 seconds +time to initialize megatron (seconds): 9.660 +[after megatron is initialized] datetime: 2025-06-21 22:11:46 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder>>> embedding +>>> output_layer + +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 148442112 + +>>> embedding + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 148442112 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 148442112 +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (148442112 elements, 148442112 padded size): + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.embedding.word_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (47.83, 48.28) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:11:46 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=2048, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005653 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33296 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002532 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33281 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002646 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33343 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:11:46 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (665.58, 705.14) + train/valid/test-data-iterators-setup ..........: (18.62, 153.72) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:11:46 +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask batch tensor after cp:torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokensposition_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 22:12:00] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 13533.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 16] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7796.0 | max reserved: 7796.0 +[Rank 17] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7700.0 | max reserved: 7700.0 +[Rank 26] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7844.0 | max reserved: 7844.0 +[Rank 25] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7844.0 | max reserved: 7844.0[Rank 27] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7716.0 | max reserved: 7716.0 + +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 9] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7746.0 | max reserved: 7746.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7618.0 | max reserved: 7618.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7478.0 | max reserved: 7478.0[Rank 11] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7618.0 | max reserved: 7618.0 + +[Rank 8] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7650.0 | max reserved: 7650.0 +[Rank 19] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7668.0 | max reserved: 7668.0 +[Rank 28] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7832.0 | max reserved: 7832.0 +[Rank 24] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7844.0 | max reserved: 7844.0[Rank 31] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7704.0 | max reserved: 7704.0 + +[Rank 1] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7672.0 | max reserved: 7672.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7608.0 | max reserved: 7608.0 +[Rank 18] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7796.0 | max reserved: 7796.0 +[Rank 22] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7766.0 | max reserved: 7766.0 +[Rank 29] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7832.0 | max reserved: 7832.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7672.0 | max reserved: 7672.0 +[Rank 13] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7654.0 | max reserved: 7654.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7654.0 | max reserved: 7654.0 +[Rank 21] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7638.0 | max reserved: 7638.0[Rank 20] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7766.0 | max reserved: 7766.0 + +[Rank 30] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7832.0 | max reserved: 7832.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7560.0 | max reserved: 7560.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7654.0 | max reserved: 7654.0 +[Rank 23] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7638.0 | max reserved: 7638.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7672.0 | max reserved: 7672.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7560.0 | max reserved: 7560.0[Rank 7] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7608.0 | max reserved: 7608.0 + +[Rank 3] (after 1 iterations) memory (MB) | allocated: 4156.59814453125 | max allocated: 6969.68798828125 | reserved: 7544.0 | max reserved: 7544.0 +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_maskbatch tensor after cp: tokenstorch.Size([8, 1, 16384, 16384]) +torch.Size([8, 2048]) +batch tensor:batch tensor after cp: labelsposition_ids torch.Size([8, 2048]) +torch.Size([8, 16384])batch tensor after cp: +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) + loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 22:12:00] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 205.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([8, 2048]) +torch.Size([8, 16384])batch tensor after cp: +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) + labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor:batch tensor after cp: labelsloss_mask torch.Size([8, 16384])torch.Size([8, 2048]) + +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor:batch tensor after cp: loss_maskattention_mask torch.Size([8, 16384])torch.Size([8, 1, 2048, 16384]) + +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp:batch tensor: attention_maskposition_ids torch.Size([8, 2048]) +torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp:batch tensor: tokensattention_mask torch.Size([8, 2048])torch.Size([8, 1, 16384, 16384]) + +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp:batch tensor: labelsposition_ids torch.Size([8, 2048])torch.Size([8, 16384]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 22:12:00] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 148.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384])batch tensor after cp: +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) + tokensbatch tensor: torch.Size([8, 2048])labels + torch.Size([8, 16384])batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) + labelsbatch tensor: torch.Size([8, 2048])loss_mask + batch tensor after cp:torch.Size([8, 16384]) +loss_mask torch.Size([8, 2048])batch tensor: + batch tensor after cp:attention_mask attention_mask torch.Size([8, 1, 16384, 16384])torch.Size([8, 1, 2048, 16384]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor:batch tensor after cp: position_idsposition_ids torch.Size([8, 16384])torch.Size([8, 2048]) + +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp:batch tensor: labels torch.Size([8, 2048])tokens +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) + batch tensor after cp: loss_mask torch.Size([8, 16384])torch.Size([8, 2048]) + +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp:batch tensor: attention_masklabels torch.Size([8, 1, 2048, 16384])torch.Size([8, 16384]) + +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp:batch tensor: position_idsloss_mask torch.Size([8, 16384])torch.Size([8, 2048]) + +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +Start exporting trace 3 +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +Done exporting trace 3 +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) + [2025-06-21 22:12:00] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 143.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384])batch tensor: +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labelstokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +torch.Size([8, 16384])batch tensor: + attention_mask batch tensor: torch.Size([8, 1, 16384, 16384])labels + batch tensor:torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +position_ids batch tensor:torch.Size([8, 16384])batch tensor: +loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +tokens batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +torch.Size([8, 16384])batch tensor: + position_idsbatch tensor: batch tensor: torch.Size([8, 16384])labels + torch.Size([8, 16384])tokens +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) + batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_masktorch.Size([8, 16384]) +torch.Size([8, 1, 16384, 16384]) +batch tensor: batch tensor:labels position_ids torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp:batch tensor after cp: tokensloss_mask batch tensor after cp:torch.Size([8, 2048]) +torch.Size([8, 2048])tokensbatch tensor after cp: + attention_maskbatch tensor after cp: batch tensor after cp:torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +labels torch.Size([8, 1, 2048, 16384])tokensbatch tensor after cp: + batch tensor after cp:torch.Size([8, 2048])labelstorch.Size([8, 2048]) +position_ids +torch.Size([8, 2048]) batch tensor after cp:batch tensor after cp: + torch.Size([8, 2048]) batch tensor after cp: +labels loss_mask loss_mask torch.Size([8, 2048]) torch.Size([8, 2048])torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) + + +batch tensor after cp:batch tensor after cp:batch tensor after cp: loss_maskattention_mask attention_masktorch.Size([8, 2048])torch.Size([8, 1, 2048, 16384]) + +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp:batch tensor after cp: batch tensor:torch.Size([8, 1, 2048, 16384])attention_maskposition_ids + torch.Size([8, 2048])batch tensor after cp:tokens + torch.Size([8, 1, 2048, 16384])position_ids + batch tensor after cp:torch.Size([8, 2048]) +position_ids torch.Size([8, 2048]) +torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 22:12:01] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 146.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: batch tensor:loss_mask torch.Size([8, 16384]) + batch tensor:tokens attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_idstorch.Size([8, 16384]) torch.Size([8, 16384]) + +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp:batch tensor after cp: tokensloss_mask torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp:batch tensor after cp: attention_masklabels torch.Size([8, 2048])torch.Size([8, 1, 2048, 16384]) + +batch tensor after cp:batch tensor after cp: loss_maskposition_ids torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor:batch tensor: position_ids torch.Size([8, 16384])tokens + torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor:batch tensor after cp: loss_mask torch.Size([8, 2048])tokens + batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp:torch.Size([8, 16384]) position_ids + torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 22:12:01] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 146.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: batch tensor:attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) + batch tensor after cp:tokens position_ids torch.Size([8, 2048]) +torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_maskbatch tensor: torch.Size([8, 1, 16384, 16384]) + batch tensor:tokens position_ids torch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor:batch tensor: tokens tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels batch tensor: torch.Size([8, 16384])labels +torch.Size([8, 16384])batch tensor: +batch tensor: loss_maskloss_mask torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor:batch tensor: attention_maskattention_mask torch.Size([8, 1, 16384, 16384]) +torch.Size([8, 1, 16384, 16384]) +batch tensor: batch tensor:position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens batch tensor after cp: torch.Size([8, 16384])tokens +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) + batch tensor:torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +labels batch tensor after cp:torch.Size([8, 16384]) +labelsbatch tensor: torch.Size([8, 2048])loss_mask +batch tensor: batch tensor after cp:attention_mask tokenstorch.Size([8, 1, 16384, 16384]) +torch.Size([8, 2048])batch tensor: +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) + batch tensor after cp:torch.Size([8, 16384]) + position_idsbatch tensor after cp: torch.Size([8, 16384])labels + torch.Size([8, 2048])batch tensor after cp: + batch tensor after cp:tokens loss_mask torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: labelsbatch tensor after cp: torch.Size([8, 2048])attention_mask + batch tensor after cp: loss_masktorch.Size([8, 1, 2048, 16384]) +torch.Size([8, 2048]) +batch tensor after cp:batch tensor after cp: position_idsattention_mask torch.Size([8, 2048])torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +loss_mask batch tensor:torch.Size([8, 2048])batch tensor after cp: + attention_maskbatch tensor after cp: tokens torch.Size([8, 1, 16384, 16384])attention_mask +torch.Size([8, 2048]) + +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor:torch.Size([8, 1, 2048, 16384]) batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +position_ids batch tensor after cp: labelstorch.Size([8, 16384]) +position_idstorch.Size([8, 2048]) +torch.Size([8, 2048])batch tensor after cp: + loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 22:12:01] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 142.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask batch tensor after cp:torch.Size([8, 1, 16384, 16384]) +tokens batch tensor: position_idstorch.Size([8, 2048]) +torch.Size([8, 16384])batch tensor after cp: +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) + labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor:batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids batch tensor:torch.Size([8, 16384]) + tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp:batch tensor after cp: position_idstokensbatch tensor after cp: torch.Size([8, 2048])torch.Size([8, 2048])tokens + +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) + batch tensor after cp:torch.Size([8, 2048]) +labelsbatch tensor after cp: torch.Size([8, 2048])labels + batch tensor after cp:torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +loss_maskbatch tensor after cp: torch.Size([8, 2048])loss_mask + batch tensor after cp:torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +attention_maskbatch tensor after cp: torch.Size([8, 1, 2048, 16384])attention_mask + batch tensor after cp:torch.Size([8, 1, 2048, 16384]) +position_idsbatch tensor after cp: torch.Size([8, 2048])position_ids +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: batch tensor after cp:attention_mask tokenstorch.Size([8, 1, 2048, 16384]) +batch tensor after cp:torch.Size([8, 2048]) +position_ids batch tensor after cp:torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 22:12:01] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 142.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor:batch tensor: tokenstokens torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor:batch tensor: labelslabels torch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor: batch tensor:loss_mask loss_masktorch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_maskbatch tensor: attention_masktorch.Size([8, 1, 16384, 16384]) +batch tensor:torch.Size([8, 1, 16384, 16384]) +position_ids batch tensor:torch.Size([8, 16384]) +position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask batch tensor:torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokensposition_ids torch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048])batch tensor after cp: + batch tensor after cp:tokens attention_mask torch.Size([8, 2048])torch.Size([8, 1, 2048, 16384]) + +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp:batch tensor after cp: position_idslabels torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp:batch tensor after cp: attention_maskattention_mask torch.Size([8, 1, 2048, 16384])torch.Size([8, 1, 2048, 16384]) + +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp:batch tensor after cp: batch tensor after cp:position_idsposition_ids tokenstorch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp:batch tensor: tokens torch.Size([8, 2048])tokens +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) + batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 16384])torch.Size([8, 2048]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: batch tensor:attention_mask labels torch.Size([8, 1, 2048, 16384])torch.Size([8, 16384]) + +batch tensor after cp:batch tensor: position_idsloss_mask torch.Size([8, 2048])torch.Size([8, 16384]) + +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp:batch tensor: tokensloss_mask torch.Size([8, 16384])torch.Size([8, 2048]) + +batch tensor after cp:batch tensor: labelsattention_mask torch.Size([8, 2048]) +torch.Size([8, 1, 16384, 16384])batch tensor after cp: + loss_maskbatch tensor: torch.Size([8, 2048])position_ids + batch tensor after cp:torch.Size([8, 16384]) +attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor:batch tensor: tokenstokens torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor: batch tensor:labels labelstorch.Size([8, 16384]) +torch.Size([8, 16384])batch tensor: + batch tensor:loss_mask loss_masktorch.Size([8, 16384]) +torch.Size([8, 16384])batch tensor: + batch tensor:attention_mask attention_masktorch.Size([8, 1, 16384, 16384]) +torch.Size([8, 1, 16384, 16384])batch tensor: + position_ids batch tensor:torch.Size([8, 16384]) +position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: batch tensor:position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) + tokens torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 22:12:01] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 142.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask batch tensor:torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_masktokens torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labelsbatch tensor after cp: torch.Size([8, 16384])tokens +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) + batch tensor:torch.Size([8, 2048]) +loss_mask batch tensor after cp: torch.Size([8, 16384])labels +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: batch tensor after cp:tokens tokens torch.Size([8, 2048]) +batch tensor after cp: labelstorch.Size([8, 16384]) + torch.Size([8, 2048])batch tensor: + batch tensor after cp:attention_mask loss_mask torch.Size([8, 1, 16384, 16384])torch.Size([8, 2048]) + +batch tensor:batch tensor after cp: position_idsattention_mask torch.Size([8, 16384]) +torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +torch.Size([8, 2048]) +batch tensor: batch tensor after cp:labels loss_mask torch.Size([8, 16384]) +torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: batch tensor after cp:tokens tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 16384])torch.Size([8, 2048]) + +batch tensor after cp: loss_mask batch tensor:torch.Size([8, 2048]) +batch tensor: batch tensor after cp: loss_maskattention_mask torch.Size([8, 16384]) +torch.Size([8, 1, 2048, 16384]) +batch tensor after cp:batch tensor: position_idsattention_mask torch.Size([8, 2048]) +torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +labels batch tensor after cp:torch.Size([8, 16384]) +attention_mask batch tensor: torch.Size([8, 1, 2048, 16384])loss_mask + batch tensor after cp:torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +position_ids batch tensor:torch.Size([8, 2048]) +attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens batch tensor: torch.Size([8, 16384])tokens +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) + batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor:batch tensor: attention_mask labels torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +torch.Size([8, 16384])batch tensor: +position_idsbatch tensor: torch.Size([8, 16384])loss_mask + torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 22:12:01] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 141.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 22:12:01 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.03644514083862305 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.036690711975097656 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.036957740783691406 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.037282705307006836 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.03732442855834961 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.03732442855834961 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.03785347938537598 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.038224220275878906 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.0395505428314209 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.0395662784576416 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.039601802825927734 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.03961610794067383 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.039679527282714844 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.039725542068481445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.03975939750671387 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.04013514518737793 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.04011392593383789 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.03998279571533203 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.040451765060424805 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.04021310806274414 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.04029130935668945 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.04031825065612793 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.04251909255981445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.04268503189086914 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.05240988731384277 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.052426815032958984 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.05249285697937012 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.05250215530395508 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.0529475212097168 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.05326509475708008 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.059604644775390625 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.059736013412475586 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(100663296), 2), (np.int64(100663296), 3), (np.int64(92480512), 4), (np.int64(96468992), 5), (np.int64(96468992), 6), (np.int64(92480512), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.203897476196289 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1990370750427246 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1989538669586182 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1991307735443115 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2051024436950684 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1991782188415527 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1992101669311523 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.199448585510254 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1992835998535156 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2038626670837402 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2232654094696045 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1927695274353027 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1927850246429443 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1928887367248535 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1928668022155762 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2053196430206299 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2232639789581299 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1930291652679443 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1999139785766602 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2041864395141602 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2232952117919922 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1931707859039307 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2041993141174316 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.223478078842163 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2174804210662842 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.223677635192871 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.1942870616912842 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2063755989074707 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2237989902496338 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.224226474761963 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.01827216148376465 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2296478748321533 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, plan time: 0.01567363739013672 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, plan time: 0.015768051147460938 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, plan time: 0.016332149505615234 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, plan time: 0.015645265579223633 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, plan time: 0.014677047729492188 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, plan time: 0.01586437225341797 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.016256093978881836 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.012249469757080078 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.016559839248657227 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2685528 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, plan time: 0.014783620834350586 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, plan time: 0.01549530029296875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2678761 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2678823 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.011919021606445312 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.016685009002685547 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2694378 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.015880346298217773 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.016474246978759766 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, plan time: 0.016093015670776367 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2685585 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, plan time: 0.015418529510498047 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2692552 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.015901565551757812 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.01648426055908203 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2694647 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2685585 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2685618 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2692547 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.014016151428222656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.011844158172607422 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.01665496826171875 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, plan time: 0.014846086502075195 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2692714 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.267914 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2694774 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2694771 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.269487 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2685835 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, plan time: 0.016375064849853516 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 4.9591064453125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.008148193359375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.269508 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.268587 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2692964 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, plan time: 0.016139984130859375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2693176 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.267927 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.016666889190673828 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2686064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.437301635742188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.008148193359375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.914138793945312e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.914138793945312e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.677078247070312e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, plan time: 0.015391826629638672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2693422 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.888938903808594e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2679393 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.698204040527344e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.016672611236572266 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.175041198730469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.581710815429688e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2679412 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.747245788574219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.817413330078125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.269548 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.176399230957031e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2695549 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.059906005859375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011110305786132812 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.009506225585938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.698204040527344e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.031990051269531e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.936622619628906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.915496826171875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.198883056640625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.608268737792969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2693706 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.016193389892578125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.127357482910156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.5789947509765625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, plan time: 0.015037059783935547 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.341934204101562e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2685125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2688553 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.295608520507812e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010275840759277344 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.535385131835938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.128715515136719e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, plan time: 0.004000663757324219 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.269783 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00015115737915039062 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.01595449447631836 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543923.2726512 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.1021575927734375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.046991825103759766 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3159888 rank: 22, write(async) time: 0.047403812408447266 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04720616340637207 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3161573 rank: 20, write(async) time: 0.047597408294677734 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0476839542388916 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3166726 rank: 21, write(async) time: 0.04807472229003906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0496823787689209 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3186312 rank: 18, write(async) time: 0.05006742477416992 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04995012283325195 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3189034 rank: 17, write(async) time: 0.05034589767456055 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05017733573913574 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.320015 rank: 15, write(async) time: 0.05057549476623535 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05021834373474121 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.320123 rank: 13, write(async) time: 0.050612449645996094 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05109548568725586 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0505366325378418 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05043363571166992 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05054736137390137 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.051555633544921875 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3193755 rank: 5, write(async) time: 0.05149579048156738 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3204143 rank: 14, write(async) time: 0.05093955993652344 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3206081 rank: 16, write(async) time: 0.05199933052062988 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3193924 rank: 7, write(async) time: 0.05088043212890625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3204396 rank: 12, write(async) time: 0.05095958709716797 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05218768119812012 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3221338 rank: 11, write(async) time: 0.0525820255279541 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05261087417602539 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3224936 rank: 10, write(async) time: 0.05300450325012207 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05316662788391113 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3230069 rank: 9, write(async) time: 0.05353999137878418 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.055676937103271484 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3253672 rank: 29, write(async) time: 0.05611014366149902 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05581021308898926 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05577373504638672 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3254836 rank: 31, write(async) time: 0.05621004104614258 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3255043 rank: 28, write(async) time: 0.05624794960021973 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05682039260864258 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3251183 rank: 1, write(async) time: 0.057233572006225586 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.057621002197265625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3275561 rank: 8, write(async) time: 0.05800223350524902 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05762481689453125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3266122 rank: 19, write(async) time: 0.05805778503417969 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.059625864028930664 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.329342 rank: 25, write(async) time: 0.06004452705383301 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06040692329406738 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.330196 rank: 24, write(async) time: 0.06085205078125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.061539411544799805 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.329863 rank: 4, write(async) time: 0.06193232536315918 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0597379207611084 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.332782 rank: 0, write(async) time: 0.06013226509094238 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06610655784606934 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.334465 rank: 3, write(async) time: 0.06654715538024902 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06955814361572266 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.338866 rank: 23, write(async) time: 0.07000851631164551 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0716853141784668 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.3419366 rank: 27, write(async) time: 0.07216072082519531 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 2.3126602172851562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 2.4080276489257812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 3.409385681152344e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 2.002716064453125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 2.193450927734375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 2.0742416381835938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.6450881958007812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 1.8596649169921875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 1.4066696166992188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 1.52587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 2.5272369384765625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 4.458427429199219e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.9073486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 2.3126602172851562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.0313870906829834 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 0.031951189041137695 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.030620813369750977 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.031058073043823242 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 0.028084516525268555 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 0.03194308280944824 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 0.03228282928466797 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.03124690055847168 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 0.03316211700439453 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.0330500602722168 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 0.03078150749206543 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.03375720977783203 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 0.0323939323425293 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 0.030738353729248047 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 0.030133962631225586 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.0341489315032959 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.03512239456176758 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 0.02862262725830078 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 0.037900447845458984 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 0.03590250015258789 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.031841278076171875 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.03195524215698242 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 0.040025949478149414 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.03253054618835449 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 0.04696822166442871 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 2.4318695068359375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 2.09808349609375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.0351109504699707 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.20726656913757324 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.477002 rank: 26, write(async) time: 0.20768237113952637 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.20774030685424805 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.4775994 rank: 30, write(async) time: 0.20822620391845703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.04593539237976074 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.04222512245178223 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22085632, before: 1745752064, after: 1767837696 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22319104, before: 1746808832, after: 1769127936 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22245376, before: 1761476608, after: 1783721984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.24400663375854492 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.5124586 rank: 2, write(async) time: 0.24451828002929688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22278144, before: 1758519296, after: 1780797440 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22159360, before: 1776377856, after: 1798537216 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 34283520, before: 1785901056, after: 1820184576 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 0.03330659866333008 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 0.041439056396484375 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30584832, before: 1854058496, after: 1884643328 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.2800025939941406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543923.5483859 rank: 6, write(async) time: 0.28044652938842773 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51351552, before: 1771315200, after: 1822666752 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 200704, before: 1745383424, after: 1745584128 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22163456, before: 1786425344, after: 1808588800 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 55676928, before: 1781190656, after: 1836867584 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 55382016, before: 1782112256, after: 1837494272 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51515392, before: 1765265408, after: 1816780800 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51404800, before: 1743810560, after: 1795215360 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30257152, before: 1765109760, after: 1795366912 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47210496, before: 1765265408, after: 1812475904 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47280128, before: 1743810560, after: 1791090688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 38633472, before: 1775013888, after: 1813647360 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72032256, before: 1785901056, after: 1857933312 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30347264, before: 1761873920, after: 1792221184 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51232768, before: 1770409984, after: 1821642752 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47288320, before: 1771315200, after: 1818603520 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47476736, before: 1781125120, after: 1828601856 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30175232, before: 1740914688, after: 1771089920 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30322688, before: 1768562688, after: 1798885376 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72282112, before: 1745752064, after: 1818034176 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30502912, before: 1766154240, after: 1796657152 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 262144, before: 2060632064, after: 2060894208 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30470144, before: 1772634112, after: 1803104256 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72445952, before: 1761476608, after: 1833922560 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 2.2411346435546875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 51138560, before: 1782112256, after: 1833250816 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72364032, before: 1776377856, after: 1848741888 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72503296, before: 1746808832, after: 1819312128 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46960640, before: 1770487808, after: 1817448448 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 2.4080276489257812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72491008, before: 1758519296, after: 1831010304 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72200192, before: 1854058496, after: 1926258688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 38563840, before: 1798504448, after: 1837068288 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6153698, rank: 20, write(sync,parallel): 0.2232358455657959 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6225467, rank: 31, write(sync,parallel): 0.23485541343688965 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6230834, rank: 29, write(sync,parallel): 0.2343451976776123 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6243477, rank: 25, write(sync,parallel): 0.23160743713378906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.625167, rank: 28, write(sync,parallel): 0.2358243465423584 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72364032, before: 1786425344, after: 1858789376 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.03301405906677246 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6301591, rank: 21, write(sync,parallel): 0.23955297470092773 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6329513, rank: 17, write(sync,parallel): 0.2412090301513672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72265728, before: 1768566784, after: 1840832512 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109064192, before: 1762422784, after: 1871486976 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72310784, before: 1775013888, after: 1847324672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72192000, before: 1765122048, after: 1837314048 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6450942, rank: 24, write(sync,parallel): 0.24322247505187988 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72052736, before: 1740914688, after: 1812967424 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.646919, rank: 19, write(sync,parallel): 0.24723052978515625 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.04122424125671387 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72208384, before: 1761898496, after: 1834106880 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.649556, rank: 22, write(sync,parallel): 0.26143813133239746 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6503825, rank: 18, write(sync,parallel): 0.2579929828643799 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.29s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6518803, rank: 16, write(sync,parallel): 0.2615644931793213 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72495104, before: 1772634112, after: 1845129216 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6572647, rank: 27, write(sync,parallel): 0.2441403865814209 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72527872, before: 1766154240, after: 1838682112 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51408896, before: 1789587456, after: 1840996352 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 327680, before: 1759232000, after: 1759559680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72118272, before: 1798504448, after: 1870622720 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108834816, before: 1785782272, after: 1894617088 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6758897, rank: 23, write(sync,parallel): 0.24518895149230957 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.677881, rank: 12, write(sync,parallel): 0.28884148597717285 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6775913, rank: 4, write(sync,parallel): 0.2619445323944092 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.681613, rank: 14, write(sync,parallel): 0.2948267459869385 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6864514, rank: 10, write(sync,parallel): 0.29290008544921875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47214592, before: 1789587456, after: 1836802048 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51191808, before: 1767747584, after: 1818939392 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.6917968, rank: 15, write(sync,parallel): 0.3054234981536865 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.696712, rank: 13, write(sync,parallel): 0.31005406379699707 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.7046409, rank: 9, write(sync,parallel): 0.3148193359375 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46895104, before: 1767747584, after: 1814642688 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.36s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.715132, rank: 11, write(sync,parallel): 0.3228178024291992 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.7215085, rank: 8, write(sync,parallel): 0.3268430233001709 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.7230408, rank: 26, write(sync,parallel): 0.1858980655670166 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.7239895, rank: 5, write(sync,parallel): 0.3182704448699951 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 135168, before: 1757519872, after: 1757655040 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.39s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.26s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.7578695, rank: 30, write(sync,parallel): 0.20813608169555664 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108986368, before: 1760980992, after: 1869967360 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.8168302, rank: 7, write(sync,parallel): 0.41071343421936035 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.50s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109219840, before: 1758089216, after: 1867309056 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543923.936894, rank: 6, write(sync,parallel): 0.3041067123413086 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212340736, before: 1745383424, after: 1957724160 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 211918848, before: 2060632064, after: 2272550912 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543924.041281, rank: 3, write(sync,parallel): 0.560006856918335 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.66s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543924.096557, rank: 0, write(sync,parallel): 0.5937769412994385 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212430848, before: 1759232000, after: 1971662848 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.70s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543924.164687, rank: 1, write(sync,parallel): 0.6974005699157715 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.78s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212312064, before: 1757519872, after: 1969831936 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543924.2600925, rank: 2, write(sync,parallel): 0.6089470386505127 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.70s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3118553, 1, gather: 0.10636758804321289 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3118646, 2, gather: 0.002628326416015625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3137515, 10, gather: 0.577852725982666 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3137465, 9, gather: 0.560999870300293 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3139215, 8, gather: 0.5440819263458252 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.313336, 16, gather: 0.6208882331848145 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3140197, 14, gather: 0.5825185775756836 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.31403, 12, gather: 0.5987272262573242 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.31402, 15, gather: 0.5782935619354248 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3140368, 11, gather: 0.5558130741119385 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.313535, 21, gather: 0.6402883529663086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3135521, 20, gather: 0.6617081165313721 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3140657, 13, gather: 0.571885347366333 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3135555, 23, gather: 0.5916001796722412 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3135757, 22, gather: 0.619457483291626 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3147883, 25, gather: 0.6360940933227539 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.314814, 26, gather: 0.5564785003662109 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3139284, 4, gather: 0.5977687835693359 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3139517, 6, gather: 0.3362407684326172 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3136814, 17, gather: 0.6368443965911865 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3149066, 28, gather: 0.6345088481903076 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3139253, 3, gather: 0.22158098220825195 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3139446, 18, gather: 0.6238129138946533 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.31515, 29, gather: 0.6385505199432373 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3139746, 7, gather: 0.4400913715362549 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3140776, 19, gather: 0.6233551502227783 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.315226, 24, gather: 0.6206851005554199 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3141193, 5, gather: 0.5329504013061523 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.315284, 31, gather: 0.6439759731292725 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3153226, 30, gather: 0.514704704284668 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3154411, 27, gather: 0.6070551872253418 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3167958, 0, gather: 0.16346240043640137 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543924.3305736, metadata_write: 0.013621091842651367 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1802s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6167s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1276s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6415s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6570s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6440s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6818s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6430s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6400s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6543s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5759s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6580s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6635s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6558s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2408s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0237s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5925s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5765s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5650s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5986s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5990s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5821s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6192s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6032s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6399s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6608s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6117s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5336s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3555s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5520s +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.0036678314208984375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.003657817840576172 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.0036993026733398438 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.003542184829711914 to finalize ckpt save +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.6262s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4599s +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.003691434860229492 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.0037145614624023438 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.003705739974975586 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.0036706924438476562 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.0036923885345458984 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.0036950111389160156 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.0037126541137695312 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.003714323043823242 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.0038137435913085938 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.0037772655487060547 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.003683328628540039 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.003686666488647461 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.003675699234008789 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.003681182861328125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.0036766529083251953 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.0037071704864501953 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.0037000179290771484 to finalize ckpt save + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.0037996768951416016 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.003767728805541992 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.003509998321533203 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.003682851791381836 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.0036797523498535156 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.003662586212158203 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.0036895275115966797 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.00366973876953125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.0036351680755615234 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.003670930862426758 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.003588438034057617 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp:batch tensor: attention_mask torch.Size([8, 1, 2048, 16384])tokens + batch tensor after cp: position_ids torch.Size([8, 2048]) +torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels batch tensor:torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_masktokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +torch.Size([8, 16384])batch tensor: + position_ids batch tensor:torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +Start exporting trace 10 +Done exporting trace 10 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (3081.63, 3082.58) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.103783E+01 | lm loss PPL: 6.218235E+04 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor:batch tensor after cp: attention_masktokens torch.Size([8, 1, 16384, 16384]) +torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: batch tensor after cp:position_ids labels torch.Size([8, 16384]) +torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: batch tensor:attention_mask torch.Size([8, 1, 16384, 16384])tokens +batch tensor: tokens batch tensor: tokenstorch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384])torch.Size([8, 16384]) + +batch tensor: batch tensor:loss_mask labelstorch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor: batch tensor:attention_mask loss_mask torch.Size([8, 1, 16384, 16384])torch.Size([8, 16384]) + +batch tensor:batch tensor: position_idsattention_mask torch.Size([8, 16384]) +torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) + batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp:batch tensor after cp: tokenstokens torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +torch.Size([8, 16384]) +batch tensor after cp:batch tensor after cp: labelslabels torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp:batch tensor after cp: loss_maskloss_mask torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp:batch tensor after cp: attention_maskattention_mask torch.Size([8, 1, 2048, 16384])torch.Size([8, 1, 2048, 16384]) + +batch tensor after cp:batch tensor after cp: position_idsposition_ids torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor:batch tensor: tokens tokens torch.Size([8, 16384]) +torch.Size([8, 16384])batch tensor: +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: tokens torch.Size([8, 16384]) + labels batch tensor:torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +labels batch tensor: torch.Size([8, 16384])loss_mask + torch.Size([8, 16384])batch tensor: + loss_mask batch tensor:torch.Size([8, 16384]) +attention_mask batch tensor: torch.Size([8, 1, 16384, 16384])attention_mask +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens batch tensor after cp:torch.Size([8, 2048]) +tokens batch tensor after cp: labelstorch.Size([8, 2048]) +torch.Size([8, 2048])batch tensor after cp: +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) + batch tensor:torch.Size([8, 1, 16384, 16384]) +position_ids batch tensor:torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) + batch tensor after cp:labels loss_mask torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor: position_ids torch.Size([8, 16384]) +position_ids torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: batch tensor after cp:loss_mask attention_masktorch.Size([8, 2048]) +torch.Size([8, 1, 2048, 16384])batch tensor after cp: +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) + attention_maskbatch tensor after cp: position_ids torch.Size([8, 1, 2048, 16384])torch.Size([8, 2048]) + +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: loss_mask batch tensor after cp:torch.Size([8, 2048]) + batch tensor after cp:tokens attention_mask torch.Size([8, 2048])torch.Size([8, 1, 2048, 16384]) + +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp:batch tensor after cp: labelsposition_ids torch.Size([8, 2048])torch.Size([8, 2048]) + +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor: tokens torch.Size([8, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +batch tensor: labels torch.Size([8, 16384]) +batch tensor: loss_mask torch.Size([8, 16384]) +batch tensor: attention_mask torch.Size([8, 1, 16384, 16384]) +batch tensor: position_ids torch.Size([8, 16384]) +batch tensor after cp: tokens torch.Size([8, 2048]) +batch tensor after cp: labels torch.Size([8, 2048]) +batch tensor after cp: loss_mask torch.Size([8, 2048]) +(min, max) time across ranks (ms): + evaluate .......................................: (78.66, 83.27) +batch tensor after cp: attention_mask torch.Size([8, 1, 2048, 16384]) +batch tensor after cp: position_ids torch.Size([8, 2048]) +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.103783E+01 | lm loss PPL: 6.218235E+04 | +---------------------------------------------------------------------------------------------------------- +Start exporting trace 11 +Done exporting trace 11 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Running ctx_length=4096, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 4096 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 4096 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 4096 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.053 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 6.621 seconds +time to initialize megatron (seconds): 12.772 +[after megatron is initialized] datetime: 2025-06-21 22:12:51 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 156830720 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 156830720 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (156830720 elements, 156830720 padded size): + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.final_layernorm.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.07, 3.72) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:12:51 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=4096, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005605 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16648 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002194 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16640 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002096 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16671 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:12:52 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (440.92, 488.04) + train/valid/test-data-iterators-setup ..........: (19.89, 165.65) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:12:52 +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 22:13:04] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 12939.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 0] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18664.0 | max reserved: 18664.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18600.0 | max reserved: 18600.0 +[Rank 26] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 19130.0 | max reserved: 19130.0[Rank 25] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18906.0 | max reserved: 18906.0[Rank 31] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18878.0 | max reserved: 18878.0[Rank 27] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18906.0 | max reserved: 18906.0 + + + +[Rank 16] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18780.0 | max reserved: 18780.0 +[Rank 17] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18780.0 | max reserved: 18780.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18716.0 | max reserved: 18716.0[Rank 14] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18720.0 | max reserved: 18720.0[Rank 12] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18976.0 | max reserved: 18976.0 + + +[Rank 8] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18972.0 | max reserved: 18972.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18972.0 | max reserved: 18972.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18886.0 | max reserved: 18886.0 +[Rank 24] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 19130.0 | max reserved: 19130.0 +[Rank 28] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 19102.0 | max reserved: 19102.0 +[Rank 18] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18780.0 | max reserved: 18780.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18976.0 | max reserved: 18976.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18630.0 | max reserved: 18630.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18630.0 | max reserved: 18630.0 +[Rank 30] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 19102.0 | max reserved: 19102.0 +[Rank 21] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18782.0 | max reserved: 18782.0[Rank 19] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18780.0 | max reserved: 18780.0 + +[Rank 13] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18976.0 | max reserved: 18976.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18600.0 | max reserved: 18600.0 +[Rank 29] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 19102.0 | max reserved: 19102.0 +[Rank 23] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18782.0 | max reserved: 18782.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18716.0 | max reserved: 18716.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18600.0 | max reserved: 18600.0 +[Rank 22] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18782.0 | max reserved: 18782.0 +[Rank 20] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18782.0 | max reserved: 18782.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 11168.53564453125 | max allocated: 17412.96923828125 | reserved: 18630.0 | max reserved: 18630.0 +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 22:13:05] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 534.5 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 22:13:05] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 448.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp:batch tensor: attention_masktokens torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +torch.Size([8, 4096]) +batch tensor: batch tensor after cp:position_ids labels torch.Size([8, 32768]) +torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 22:13:06] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 433.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask batch tensor after cp:torch.Size([8, 1, 32768, 32768]) +tokens batch tensor:torch.Size([8, 4096]) +position_ids batch tensor after cp:torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 22:13:06] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 434.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 22:13:07] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 426.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: batch tensor after cp:position_ids tokens torch.Size([8, 32768])torch.Size([8, 4096]) + +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_maskbatch tensor: torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +tokensbatch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 22:13:07] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 448.4 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 22:13:08] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 431.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 22:13:08] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 481.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor:batch tensor after cp: tokens torch.Size([8, 4096])tokens + batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768])torch.Size([8, 32768]) +batch tensor after cp: + position_ids torch.Size([8, 4096])batch tensor: + labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: batch tensor after cp:attention_mask tokenstorch.Size([8, 1, 32768, 32768]) +torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: batch tensor after cp:position_ids labelstorch.Size([8, 32768]) +torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 22:13:09] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 423.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 22:13:09 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.033547401428222656 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.03355264663696289 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.03351330757141113 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.03350234031677246 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.033582448959350586 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.03389382362365723 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.03388833999633789 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.033919334411621094 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.03390336036682129 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.033638715744018555 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.03362107276916504 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.03434896469116211 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.0335993766784668 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.03362441062927246 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.03364300727844238 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.034295082092285156 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.033728599548339844 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.03438115119934082 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.03438282012939453 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.03439044952392578 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.03439188003540039 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.03439688682556152 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.03380417823791504 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.03493976593017578 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.03396272659301758 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.034841299057006836 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.0341181755065918 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.03480815887451172 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.0349123477935791 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.034485816955566406 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.040657997131347656 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.0425419807434082 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(109051904), 2), (np.int64(109051904), 3), (np.int64(100869120), 4), (np.int64(104857600), 5), (np.int64(104857600), 6), (np.int64(100869120), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(109051904), 2), (np.int64(109051904), 3), (np.int64(100869120), 4), (np.int64(104857600), 5), (np.int64(104857600), 6), (np.int64(100869120), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(109051904), 2), (np.int64(109051904), 3), (np.int64(100869120), 4), (np.int64(104857600), 5), (np.int64(104857600), 6), (np.int64(100869120), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(109051904), 2), (np.int64(109051904), 3), (np.int64(100869120), 4), (np.int64(104857600), 5), (np.int64(104857600), 6), (np.int64(100869120), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(109051904), 2), (np.int64(109051904), 3), (np.int64(100869120), 4), (np.int64(104857600), 5), (np.int64(104857600), 6), (np.int64(100869120), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(109051904), 2), (np.int64(109051904), 3), (np.int64(100869120), 4), (np.int64(104857600), 5), (np.int64(104857600), 6), (np.int64(100869120), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(109051904), 2), (np.int64(109051904), 3), (np.int64(100869120), 4), (np.int64(104857600), 5), (np.int64(104857600), 6), (np.int64(100869120), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(109051904), 2), (np.int64(109051904), 3), (np.int64(100869120), 4), (np.int64(104857600), 5), (np.int64(104857600), 6), (np.int64(100869120), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.209597110748291 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2781736850738525 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.218111276626587 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2098267078399658 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.27872896194458 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2161931991577148 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2099146842956543 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2789721488952637 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2782423496246338 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2487821578979492 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2502262592315674 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2504286766052246 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2497427463531494 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2207975387573242 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2434537410736084 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2101609706878662 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.279374122619629 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2505321502685547 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2435619831085205 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2103519439697266 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2798550128936768 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2761814594268799 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2165062427520752 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2179181575775146 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2162277698516846 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2106373310089111 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2797479629516602 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2514326572418213 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2801642417907715 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.2534799575805664 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 1.211883783340454 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.018325328826904297 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.009426116943359375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, plan time: 0.008489847183227539 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, plan time: 0.009124755859375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, plan time: 0.008444786071777344 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, plan time: 0.008517026901245117 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.009465217590332031 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.009031057357788086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.009262323379516602 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.543593 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.0034732818603515625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, plan time: 0.008811473846435547 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, plan time: 0.007741689682006836 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, plan time: 0.008533239364624023 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, plan time: 0.0074193477630615234 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.009105443954467773 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.009342193603515625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.0038094520568847656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.008970975875854492 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.00826573371887207 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, plan time: 0.0072057247161865234 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5450268 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5442936 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5443077 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.545239 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5452392 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5452409 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5436287 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.54363 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5436332 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.543634 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5436363 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.269050598144531e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, plan time: 0.008137702941894531 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5450363 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, plan time: 0.008810043334960938 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, plan time: 0.006361484527587891 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5443141 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, plan time: 0.008973121643066406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5452495 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.1021575927734375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, plan time: 0.006838560104370117 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5450513 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5450578 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5443215 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.009239912033081055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.009014606475830078 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.246566772460938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.984306335449219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.29425048828125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.29425048828125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5450602 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, plan time: 0.007626533508300781 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5443473 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.54434 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.604194641113281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.8650970458984375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5453017 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.413459777832031e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.198883056640625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.7220458984375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.0025238990783691406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.545069 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.545071 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5450795 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.556510925292969e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.437301635742188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.544382 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.545306 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5437799 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.91278076171875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.866455078125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.559226989746094e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.413459777832031e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, plan time: 0.0070459842681884766 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.008148193359375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.389617919921875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.771087646484375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.437301635742188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.368492126464844e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.937980651855469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.937980651855469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010085105895996094 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.00947880744934082 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.344650268554688e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5444467 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5455444 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.843971252441406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.772445678710938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.389617919921875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.009946107864379883 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.678436279296875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5456407 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.009455442428588867 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.842613220214844e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543990.5484667 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.340576171875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05130481719970703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05164933204650879 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.5961142 rank: 21, write(async) time: 0.0517880916595459 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.5971358 rank: 28, write(async) time: 0.0520784854888916 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0515446662902832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05182790756225586 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.5964744 rank: 22, write(async) time: 0.05213117599487305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0524592399597168 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.051952362060546875 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.597859 rank: 24, write(async) time: 0.05282282829284668 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.596631 rank: 18, write(async) time: 0.05228376388549805 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.5967185 rank: 23, write(async) time: 0.05242156982421875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.053185462951660156 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0528874397277832 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.598674 rank: 31, write(async) time: 0.05361795425415039 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.5976703 rank: 16, write(async) time: 0.05329132080078125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05379676818847656 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05442023277282715 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.5994883 rank: 10, write(async) time: 0.054247140884399414 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.5992172 rank: 17, write(async) time: 0.05490398406982422 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05413055419921875 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.599867 rank: 11, write(async) time: 0.054566383361816406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05414915084838867 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6002774 rank: 13, write(async) time: 0.054633140563964844 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05516362190246582 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6008625 rank: 15, write(async) time: 0.05562257766723633 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05618929862976074 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.056034088134765625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.60175 rank: 29, write(async) time: 0.05666947364807129 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.601781 rank: 14, write(async) time: 0.05654120445251465 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05671119689941406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6022387 rank: 25, write(async) time: 0.05716872215270996 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05676126480102539 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05725550651550293 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6024816 rank: 12, write(async) time: 0.05723118782043457 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6020458 rank: 19, write(async) time: 0.05773568153381348 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05737757682800293 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05761885643005371 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6030738 rank: 8, write(async) time: 0.05776619911193848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05781412124633789 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6031291 rank: 27, write(async) time: 0.05805706977844238 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6019208 rank: 1, write(async) time: 0.05828595161437988 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05863666534423828 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.059162139892578125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.604651 rank: 9, write(async) time: 0.05910205841064453 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6031814 rank: 4, write(async) time: 0.05955362319946289 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06015467643737793 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6041903 rank: 5, write(async) time: 0.06059432029724121 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06038975715637207 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6045 rank: 3, write(async) time: 0.06087231636047363 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06263399124145508 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6066716 rank: 7, write(async) time: 0.06303572654724121 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06574892997741699 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6106505 rank: 20, write(async) time: 0.06620359420776367 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 9.632110595703125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 2.002716064453125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06545066833496094 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.6144145 rank: 0, write(async) time: 0.0659480094909668 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.6689300537109375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.5497207641601562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.7404556274414062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 1.621246337890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 1.6927719116210938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 0.02952432632446289 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 3.314018249511719e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 1.8596649169921875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 0.02965569496154785 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 0.031755685806274414 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 0.03949689865112305 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 0.03347063064575195 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 0.04267597198486328 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 0.03808093070983887 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.03628683090209961 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 0.03383803367614746 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.03729438781738281 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.03645491600036621 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.0386347770690918 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.03646492958068848 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 2.4557113647460938e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 2.0265579223632812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 2.1696090698242188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.040978431701660156 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.03913617134094238 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 0.032347679138183594 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 0.03166651725769043 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.04796266555786133 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 0.03237271308898926 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 0.04334831237792969 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 0.039902687072753906 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 0.0410158634185791 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.03415226936340332 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.03604269027709961 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.03625988960266113 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 4.00543212890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 4.9591064453125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.16744446754455566 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.712956 rank: 30, write(async) time: 0.1678941249847412 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21987328, before: 1908506624, after: 1930493952 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.2118229866027832 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.7573316 rank: 26, write(async) time: 0.21230626106262207 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.05675339698791504 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.057245492935180664 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22216704, before: 1916022784, after: 1938239488 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22315008, before: 1911717888, after: 1934032896 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22331392, before: 1858236416, after: 1880567808 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.044396162033081055 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 0.03269052505493164 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22249472, before: 1931345920, after: 1953595392 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22171648, before: 1891553280, after: 1913724928 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 43028480, before: 1979056128, after: 2022084608 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30134272, before: 1889816576, after: 1919950848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30511104, before: 1880547328, after: 1911058432 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30482432, before: 1834172416, after: 1864654848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 50278400, before: 1889202176, after: 1939480576 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30208000, before: 1928814592, after: 1959022592 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 0.03159070014953613 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30416896, before: 1864880128, after: 1895297024 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72241152, before: 1916022784, after: 1988263936 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30187520, before: 1900564480, after: 1930752000 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 135168, before: 1877303296, after: 1877438464 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 39006208, before: 1966379008, after: 2005385216 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72216576, before: 1908432896, after: 1980649472 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72286208, before: 1979056128, after: 2051342336 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47296512, before: 1889202176, after: 1936498688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51503104, before: 1878421504, after: 1929924608 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 286720, before: 2113302528, after: 2113589248 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72429568, before: 1966379008, after: 2038808576 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 55795712, before: 1866117120, after: 1921912832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72359936, before: 1891569664, after: 1963929600 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 50110464, before: 1915047936, after: 1965158400 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.3122432231903076 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.856354 rank: 6, write(async) time: 0.31272077560424805 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47300608, before: 1878421504, after: 1925722112 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46768128, before: 1915047936, after: 1961816064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 55799808, before: 1866117120, after: 1921916928 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47329280, before: 1859387392, after: 1906716672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51347456, before: 1859469312, after: 1910816768 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.8678515, rank: 17, write(sync,parallel): 0.22195076942443848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72282112, before: 1928814592, after: 2001096704 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47214592, before: 1890557952, after: 1937772544 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.8742833, rank: 21, write(sync,parallel): 0.23367094993591309 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46342144, before: 1879965696, after: 1926307840 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72527872, before: 1931345920, after: 2003873792 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72511488, before: 1834192896, after: 1906704384 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.336315393447876 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.880733, rank: 24, write(sync,parallel): 0.223555326461792 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72196096, before: 1900564480, after: 1972760576 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543990.8806388 rank: 2, write(async) time: 0.33685755729675293 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.895332, rank: 16, write(sync,parallel): 0.24227523803710938 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.8845918, rank: 31, write(sync,parallel): 0.21513795852661133 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72269824, before: 1879965696, after: 1952235520 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72433664, before: 1868804096, after: 1941237760 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9010346, rank: 19, write(sync,parallel): 0.2458357810974121 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9150534, rank: 10, write(sync,parallel): 0.25556135177612305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9008408, rank: 28, write(sync,parallel): 0.22775769233703613 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72347648, before: 1911721984, after: 1984069632 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9012196, rank: 29, write(sync,parallel): 0.2258286476135254 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72531968, before: 1858236416, after: 1930768384 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9184332, rank: 13, write(sync,parallel): 0.26048779487609863 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72462336, before: 1864880128, after: 1937342464 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9107776, rank: 25, write(sync,parallel): 0.2267603874206543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72462336, before: 1880547328, after: 1953009664 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9236667, rank: 23, write(sync,parallel): 0.269589900970459 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 42991616, before: 1868820480, after: 1911812096 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72253440, before: 1890557952, after: 1962811392 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9312215, rank: 27, write(sync,parallel): 0.24782752990722656 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72134656, before: 1889832960, after: 1961967616 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51236864, before: 1876905984, after: 1928142848 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109207552, before: 1836408832, after: 1945616384 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.938997, rank: 14, write(sync,parallel): 0.2793703079223633 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 200704, before: 1892966400, after: 1893167104 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109023232, before: 1912758272, after: 2021781504 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.04172968864440918 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9494271, rank: 8, write(sync,parallel): 0.28311610221862793 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9538329, rank: 18, write(sync,parallel): 0.3031008243560791 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47042560, before: 1876905984, after: 1923948544 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51412992, before: 1829150720, after: 1880563712 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9614518, rank: 22, write(sync,parallel): 0.31923913955688477 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.96745, rank: 11, write(sync,parallel): 0.3087284564971924 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9712272, rank: 20, write(sync,parallel): 0.2891688346862793 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.971523, rank: 9, write(sync,parallel): 0.3062703609466553 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 2.0265579223632812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9844327, rank: 15, write(sync,parallel): 0.32617902755737305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47218688, before: 1829150720, after: 1876369408 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9853892, rank: 12, write(sync,parallel): 0.31034326553344727 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9896235, rank: 5, write(sync,parallel): 0.29494333267211914 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9926412, rank: 4, write(sync,parallel): 0.29595279693603516 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543990.9959207, rank: 30, write(sync,parallel): 0.21322846412658691 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.034203529357910156 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.39s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.41s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543991.0216355, rank: 26, write(sync,parallel): 0.1934492588043213 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.29s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.38s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.26s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 131072, before: 1840324608, after: 1840455680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108920832, before: 1946382336, after: 2055303168 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543991.1539075, rank: 7, write(sync,parallel): 0.45784664154052734 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.54s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212295680, before: 1877303296, after: 2089598976 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543991.262585, rank: 3, write(sync,parallel): 0.4991421699523926 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109178880, before: 1923477504, after: 2032656384 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212299776, before: 2113302528, after: 2325602304 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.60s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543991.3172863, rank: 6, write(sync,parallel): 0.36698079109191895 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212283392, before: 1892966400, after: 2105249792 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543991.3287988, rank: 0, write(sync,parallel): 0.5473275184631348 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.46s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543991.373512, rank: 1, write(sync,parallel): 0.6093471050262451 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.66s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.71s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212414464, before: 1840324608, after: 2052739072 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543991.4767876, rank: 2, write(sync,parallel): 0.4613771438598633 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.54s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5225592, 2, gather: 0.0024483203887939453 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5228271, 1, gather: 0.10767626762390137 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5244339, 9, gather: 0.5068395137786865 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5245218, 12, gather: 0.4911501407623291 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5245273, 11, gather: 0.5066993236541748 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5246854, 13, gather: 0.5071773529052734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5241516, 16, gather: 0.5079216957092285 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5248399, 8, gather: 0.5078887939453125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5254486, 26, gather: 0.46751832962036133 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5244055, 22, gather: 0.5070254802703857 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.524771, 10, gather: 0.5074052810668945 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.525552, 25, gather: 0.5081253051757812 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.524499, 19, gather: 0.5076456069946289 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5250611, 14, gather: 0.5061657428741455 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5246632, 6, gather: 0.16154813766479492 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5256755, 28, gather: 0.5080888271331787 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.524532, 18, gather: 0.5077581405639648 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5253446, 15, gather: 0.49940919876098633 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5247514, 3, gather: 0.21935224533081055 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5258079, 27, gather: 0.5080668926239014 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5246398, 17, gather: 0.5080966949462891 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.524759, 5, gather: 0.4923069477081299 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5258975, 24, gather: 0.5086190700531006 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5245018, 23, gather: 0.5074794292449951 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5247803, 4, gather: 0.48767757415771484 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5259702, 30, gather: 0.49245333671569824 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5259817, 31, gather: 0.5082559585571289 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.52474, 20, gather: 0.5082013607025146 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5245879, 21, gather: 0.5077319145202637 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5248692, 7, gather: 0.32402896881103516 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5259876, 29, gather: 0.5075576305389404 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5275507, 0, gather: 0.13849830627441406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543991.5398362, metadata_write: 0.012118101119995117 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1269s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0218s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1535s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2366s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5048s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1789s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5096s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3415s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4856s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5267s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5263s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5261s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5265s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5264s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5102s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5259s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5268s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5267s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5264s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5106s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5267s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5265s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5266s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5265s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5263s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5265s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5262s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5182s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.0025789737701416016 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.0025725364685058594 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.0025663375854492188 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.0025911331176757812 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.002494335174560547 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.0025832653045654297 to finalize ckpt save +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5265s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5254s +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.0025527477264404297 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.0025434494018554688 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.002538442611694336 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.0025482177734375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0025594234466552734 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.002557039260864258 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.0025420188903808594 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.002537250518798828 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.0025904178619384766 to finalize ckpt save +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5262s +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.0026044845581054688 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.0025849342346191406 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.002569437026977539 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.0025625228881835938 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.0025932788848876953 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.0025441646575927734 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.002560138702392578 to finalize ckpt save +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5265s +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.002559185028076172 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.0025687217712402344 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.0025746822357177734 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.002506256103515625 to finalize ckpt save +Evaluating on 1 samples +Evaluating iter 1/1 +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.0026552677154541016 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.002618551254272461 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.002615213394165039 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.002716541290283203 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.0026824474334716797 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.0026845932006835938 to finalize ckpt save +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 10 +Done exporting trace 10 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (3265.95, 3266.45) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.249270E+01 | lm loss PPL: 2.663849E+05 | +---------------------------------------------------------------------------------------------------------------- +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +batch tensor: tokens torch.Size([8, 32768]) +batch tensor: labels torch.Size([8, 32768]) +batch tensor: loss_mask torch.Size([8, 32768]) +batch tensor: attention_mask torch.Size([8, 1, 32768, 32768]) +batch tensor: position_ids torch.Size([8, 32768]) +batch tensor after cp: tokens torch.Size([8, 4096]) +batch tensor after cp: labels torch.Size([8, 4096]) +batch tensor after cp: loss_mask torch.Size([8, 4096]) +batch tensor after cp: attention_mask torch.Size([8, 1, 4096, 32768]) +batch tensor after cp: position_ids torch.Size([8, 4096]) +Start exporting trace 11 +Done exporting trace 11 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (290.30, 292.39) +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.249270E+01 | lm loss PPL: 2.663849E+05 | +---------------------------------------------------------------------------------------------------------- +Running ctx_length=8192, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 8192 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 8192 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 8192 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.045 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.687 seconds +time to initialize megatron (seconds): 9.100 +[after megatron is initialized] datetime: 2025-06-21 22:13:53 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 173607936 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 173607936 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (173607936 elements, 173607936 padded size): + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.embedding.position_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.08, 3.62) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:13:54 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=8192, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005811 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 8324 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001810 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 8320 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001791 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 8335 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:13:54 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (849.54, 866.07) + train/valid/test-data-iterators-setup ..........: (17.90, 152.91) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:13:54 +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: batch tensor after cp:attention_mask torch.Size([8, 1, 8192, 65536]) +tokens batch tensor after cp: position_ids torch.Size([8, 8192]) +torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_idsbatch tensor after cp: torch.Size([8, 8192]) + tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: batch tensor after cp:position_ids torch.Size([8, 8192]) + tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens batch tensor after cp:torch.Size([8, 8192]) + tokensbatch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: torch.Size([8, 8192])loss_mask +torch.Size([8, 8192]) +batch tensor after cp:batch tensor after cp: labelsattention_mask torch.Size([8, 8192]) +torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: batch tensor after cp:loss_mask position_idstorch.Size([8, 8192]) +torch.Size([8, 8192])batch tensor after cp: + attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 22:14:10] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 15379.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 26] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54968.0 | max reserved: 54968.0 +[Rank 9] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54632.0 | max reserved: 54632.0[Rank 13] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55208.0 | max reserved: 55208.0 + +[Rank 15] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55208.0 | max reserved: 55208.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55144.0 | max reserved: 55144.0[Rank 12] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54696.0 | max reserved: 54696.0 + +[Rank 16] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54760.0 | max reserved: 54760.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54504.0 | max reserved: 54504.0 +[Rank 24] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55480.0 | max reserved: 55480.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55144.0 | max reserved: 55144.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55144.0 | max reserved: 55144.0 +[Rank 17] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54760.0 | max reserved: 54760.0 +[Rank 19] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54760.0 | max reserved: 54760.0 +[Rank 3] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54504.0 | max reserved: 54504.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54504.0 | max reserved: 54504.0 +[Rank 30] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55524.0 | max reserved: 55524.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54824.0 | max reserved: 54824.0 +[Rank 21] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54888.0 | max reserved: 54888.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54568.0 | max reserved: 54568.0 +[Rank 31] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55524.0 | max reserved: 55524.0 +[Rank 20] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54888.0 | max reserved: 54888.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54504.0 | max reserved: 54504.0 +[Rank 25] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55480.0 | max reserved: 55480.0 +[Rank 23] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55336.0 | max reserved: 55336.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54568.0 | max reserved: 54568.0 +[Rank 28] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55524.0 | max reserved: 55524.0 +[Rank 22] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55336.0 | max reserved: 55336.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54568.0 | max reserved: 54568.0 +[Rank 29] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55076.0 | max reserved: 55076.0 +[Rank 18] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54760.0 | max reserved: 54760.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 54568.0 | max reserved: 54568.0 +[Rank 27] (after 1 iterations) memory (MB) | allocated: 39016.41064453125 | max allocated: 52123.53173828125 | reserved: 55032.0 | max reserved: 55032.0 +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192])batch tensor: +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +tokensbatch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 65536])torch.Size([8, 8192]) + +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp:batch tensor: loss_mask torch.Size([8, 8192])tokens +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) + batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_idstorch.Size([8, 65536]) +torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 1 +Done exporting trace 1 + [2025-06-21 22:14:12] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 1894.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 22:14:13] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 1857.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 22:14:15] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 1855.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 22:14:17] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 1876.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask batch tensor:torch.Size([8, 1, 65536, 65536]) +batch tensor: position_idstokens torch.Size([8, 65536]) +torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 22:14:19] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 1880.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 22:14:21] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 1861.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 22:14:23] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 1863.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask batch tensor after cp:torch.Size([8, 1, 8192, 65536]) + batch tensor after cp:tokens position_ids torch.Size([8, 8192])torch.Size([8, 8192]) + +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 22:14:25] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 1875.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokensbatch tensor after cp: tokenstorch.Size([8, 8192]) + batch tensor after cp: torch.Size([8, 8192])labels + batch tensor after cp:torch.Size([8, 8192]) +labelsbatch tensor after cp: torch.Size([8, 8192])loss_mask + torch.Size([8, 8192])batch tensor after cp: + loss_maskbatch tensor after cp: torch.Size([8, 8192])attention_mask + batch tensor after cp:torch.Size([8, 1, 8192, 65536]) +attention_mask batch tensor after cp: torch.Size([8, 1, 8192, 65536])position_ids + batch tensor after cp:torch.Size([8, 8192]) +position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 22:14:27] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 1885.8 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 22:14:27 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.02876877784729004 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.028794288635253906 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.028960466384887695 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.028971195220947266 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.028975725173950195 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0290372371673584 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.029380083084106445 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.030289649963378906 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.0303192138671875 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.03079390525817871 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.031015872955322266 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.03098130226135254 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.030981779098510742 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.03101181983947754 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.03127598762512207 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.03155875205993652 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.03166556358337402 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.031673431396484375 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.0315546989440918 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.03160667419433594 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.031982421875 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.03171110153198242 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.03234291076660156 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.03236699104309082 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.03239107131958008 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.03239774703979492 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.0324711799621582 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.03248453140258789 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.032869577407836914 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.03297924995422363 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.03308558464050293 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.0403897762298584 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(103809024), 1), (np.int64(92274688), 2), (np.int64(92274688), 3), (np.int64(83919872), 4), (np.int64(83919872), 5), (np.int64(88080384), 6), (np.int64(88080384), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(207618048), 0), (np.int64(134217728), 1), (np.int64(120586240), 2), (np.int64(117440512), 3), (np.int64(117440512), 4), (np.int64(113452032), 5), (np.int64(113452032), 6), (np.int64(117440512), 7)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.4810736179351807 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.4178781509399414 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.482896566390991 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.401726007461548 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.4078481197357178 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.5606601238250732 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.5608458518981934 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.5615365505218506 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.409656286239624 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.4822757244110107 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.4022631645202637 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.561006546020508 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.4400887489318848 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.556593179702759 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.401926040649414 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.5612175464630127 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.4179160594940186 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.483549118041992 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.5611460208892822 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.419396162033081 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.482177734375 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.5617997646331787 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.418362855911255 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.5353190898895264 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.402911901473999 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.40293025970459 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.47556209564209 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.483534336090088 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.4031643867492676 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.4033010005950928 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 3.5627171993255615 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.017535686492919922 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 28, plan time: 0.008551836013793945 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 30, plan time: 0.008284807205200195 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 31, plan time: 0.007513284683227539 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 26, plan time: 0.007172346115112305 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 13, plan time: 0.007536888122558594 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 15, plan time: 0.007181644439697266 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 23, plan time: 0.007864236831665039 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 20, plan time: 0.006858348846435547 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 19, plan time: 0.0077478885650634766 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.008694648742675781 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.006500244140625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.0026383399963378906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0732272 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0732288 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0732288 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 12, plan time: 0.008465766906738281 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 22, plan time: 0.007279634475708008 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.0027260780334472656 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.0064351558685302734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 29, plan time: 0.008229494094848633 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.073421 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 8, plan time: 0.008537054061889648 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0725152 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.007524013519287109 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.002985239028930664 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0732472 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.073423 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 11, plan time: 0.006770610809326172 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 17, plan time: 0.007704973220825195 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 21, plan time: 0.007752418518066406 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.072528 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.071775 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 24, plan time: 0.006590843200683594 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.073445 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0725312 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 18, plan time: 0.005629062652587891 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0717843 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.071783 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.07327 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0734558 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.072541 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.072555 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0718014 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.071793 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0718012 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0718029 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 25, plan time: 0.005884885787963867 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 10, plan time: 0.007885456085205078 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 14, plan time: 0.007782697677612305 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0725555 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.054473876953125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.079673767089844e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.532669067382812e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0732915 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0734634 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 9, plan time: 0.008180856704711914 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 16, plan time: 0.008376121520996094 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.437301635742188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0725758 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.723403930664062e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.890296936035156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.200241088867188e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0733168 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.270408630371094e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.271766662597656e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.771087646484375e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010251998901367188 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.890296936035156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.07349 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.081031799316406e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.07261 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.368492126464844e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.724761962890625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.772445678710938e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.605552673339844e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.679794311523438e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0734942 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.073501 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.367134094238281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.152557373046875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010395050048828125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.841255187988281e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010609626770019531 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.462501525878906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010538101196289062 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.462501525878906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 27, plan time: 0.0035233497619628906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 8.320808410644531e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.890296936035156e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.0735593 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.510185241699219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00011396408081054688 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.651878356933594e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.007529258728027344 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750544071.076678 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 4.8160552978515625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04856419563293457 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1222217 rank: 31, write(async) time: 0.04899191856384277 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.048705101013183594 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1216524 rank: 22, write(async) time: 0.04911231994628906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04906129837036133 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04893970489501953 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0491487979888916 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.049658775329589844 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1227105 rank: 28, write(async) time: 0.0494837760925293 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1227572 rank: 13, write(async) time: 0.04933905601501465 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1220634 rank: 23, write(async) time: 0.049546003341674805 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1218674 rank: 3, write(async) time: 0.05007171630859375 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04916095733642578 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04977822303771973 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1230147 rank: 12, write(async) time: 0.049566030502319336 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1227186 rank: 20, write(async) time: 0.05018877983093262 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05101799964904785 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05108046531677246 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1248398 rank: 15, write(async) time: 0.051416635513305664 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05107903480529785 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.123228 rank: 4, write(async) time: 0.05145120620727539 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1233068 rank: 1, write(async) time: 0.05150151252746582 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.051645517349243164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1238472 rank: 7, write(async) time: 0.05205059051513672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.052541494369506836 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1247852 rank: 5, write(async) time: 0.05297994613647461 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05345749855041504 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1273718 rank: 10, write(async) time: 0.053879737854003906 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05391955375671387 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05388689041137695 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.0539555549621582 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1268537 rank: 19, write(async) time: 0.05431795120239258 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.127876 rank: 9, write(async) time: 0.05437207221984863 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.126877 rank: 17, write(async) time: 0.05431985855102539 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05505180358886719 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 2.5033950805664062e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1280804 rank: 16, write(async) time: 0.05546975135803223 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 2.1457672119140625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.056470632553100586 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1295025 rank: 18, write(async) time: 0.056926727294921875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05657362937927246 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1302528 rank: 29, write(async) time: 0.05698108673095703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05693960189819336 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05714225769042969 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1308126 rank: 11, write(async) time: 0.057349443435668945 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05718088150024414 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1308758 rank: 25, write(async) time: 0.05755901336669922 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05745697021484375 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1301506 rank: 21, write(async) time: 0.05759549140930176 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.059224605560302734 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1313188 rank: 8, write(async) time: 0.057859182357788086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1329427 rank: 24, write(async) time: 0.059650421142578125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05878090858459473 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.06087374687194824 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.132673 rank: 14, write(async) time: 0.05917978286743164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.1348915 rank: 27, write(async) time: 0.06133270263671875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.057956695556640625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.135074 rank: 0, write(async) time: 0.05839371681213379 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 1.3828277587890625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.03001260757446289 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.030619382858276367 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.03180122375488281 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 1.8358230590820312e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 1.9788742065429688e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 1.9073486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 1.8596649169921875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, takes 0.03475785255432129 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 1.9073486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 1.7642974853515625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 1.7881393432617188e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, takes 0.030618906021118164 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, takes 0.0291750431060791 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 1.4781951904296875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, takes 0.029709339141845703 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, takes 0.029729366302490234 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, takes 0.029285669326782227 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, takes 0.03156232833862305 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, takes 0.031055450439453125 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, takes 0.03143906593322754 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, takes 0.03424978256225586 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, takes 0.030891895294189453 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, takes 0.03671717643737793 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, takes 0.03756427764892578 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, takes 0.0361170768737793 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, takes 0.03159689903259277 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, takes 0.0322575569152832 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, takes 0.03592729568481445 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, takes 0.035902976989746094 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, takes 0.02965235710144043 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, takes 0.03695535659790039 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, takes 0.04146218299865723 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.14331722259521484 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.2169626 rank: 30, write(async) time: 0.14373302459716797 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.14350318908691406 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.217225 rank: 26, write(async) time: 0.14397525787353516 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 2.1457672119140625e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, takes 0.03476738929748535 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 1.811981201171875e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 1.9311904907226562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.03229689598083496 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.03420257568359375 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, takes 0.029657363891601562 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, takes 0.03176403045654297 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22077440, before: 1708773376, after: 1730850816 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.8835067749023438e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 21942272, before: 1751617536, after: 1773559808 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22077440, before: 1706180608, after: 1728258048 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22257664, before: 1705271296, after: 1727528960 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.24002981185913086 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.2401723861694336 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.3122668 rank: 6, write(async) time: 0.24048566818237305 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.3124166 rank: 2, write(async) time: 0.2406313419342041 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 401408, before: 1702215680, after: 1702617088 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 2.0265579223632812e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22069248, before: 1725526016, after: 1747595264 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22278144, before: 1705738240, after: 1728016384 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 22216704, before: 1704755200, after: 1726971904 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30486528, before: 1705832448, after: 1736318976 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30416896, before: 1715261440, after: 1745678336 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.04209613800048828 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30523392, before: 1723404288, after: 1753927680 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.03194475173950195 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30523392, before: 1721593856, after: 1752117248 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51478528, before: 1699196928, after: 1750675456 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30441472, before: 1704747008, after: 1735188480 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 30224384, before: 1716432896, after: 1746657280 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51142656, before: 1694285824, after: 1745428480 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 49881088, before: 1722527744, after: 1772408832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51433472, before: 1706987520, after: 1758420992 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47108096, before: 1699196928, after: 1746305024 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47239168, before: 1706983424, after: 1754222592 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 55570432, before: 1696653312, after: 1752223744 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72527872, before: 1705832448, after: 1778360320 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72364032, before: 1715261440, after: 1787625472 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72470528, before: 1723404288, after: 1795874816 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72015872, before: 1777618944, after: 1849634816 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72376320, before: 1708675072, after: 1781051392 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47144960, before: 1694285824, after: 1741430784 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72224768, before: 1696653312, after: 1768878080 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46800896, before: 1722527744, after: 1769328640 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72486912, before: 1721593856, after: 1794080768 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 51589120, before: 1777618944, after: 1829208064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72335360, before: 1708519424, after: 1780854784 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 51707904, before: 1712750592, after: 1764458496 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72503296, before: 1704747008, after: 1777250304 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72130560, before: 1716465664, after: 1788596224 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 55603200, before: 1708519424, after: 1764122624 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 2.3126602172851562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 55316480, before: 1759764480, after: 1815080960 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72417280, before: 1706180608, after: 1778597888 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72224768, before: 1725526016, after: 1797750784 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 143360, before: 2009014272, after: 2009157632 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72626176, before: 1705738240, after: 1778364416 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108994560, before: 1715638272, after: 1824632832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4241312, rank: 13, write(sync,parallel): 0.23044586181640625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4251363, rank: 9, write(sync,parallel): 0.22294878959655762 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72343552, before: 1759748096, after: 1832091648 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72335360, before: 1712750592, after: 1785085952 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72441856, before: 1705271296, after: 1777713152 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.427872, rank: 10, write(sync,parallel): 0.22574901580810547 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 72327168, before: 1704755200, after: 1777082368 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4320996, rank: 31, write(sync,parallel): 0.2382678985595703 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51011584, before: 1696055296, after: 1747066880 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4338183, rank: 23, write(sync,parallel): 0.23366522789001465 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 51400704, before: 1715978240, after: 1767378944 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4393265, rank: 28, write(sync,parallel): 0.23285126686096191 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.440087, rank: 25, write(sync,parallel): 0.23339343070983887 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 108974080, before: 1751617536, after: 1860591616 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 290816, before: 1695780864, after: 1696071680 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 46817280, before: 1696055296, after: 1742872576 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.443739, rank: 14, write(sync,parallel): 0.23759794235229492 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.03236865997314453 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.444674, rank: 27, write(sync,parallel): 0.2254619598388672 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 139386880, before: 1709408256, after: 1848795136 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4458537, rank: 29, write(sync,parallel): 0.23860979080200195 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 24, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 8, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 16, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 25, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 26, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 9, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 17, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 28, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 27, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 10, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 19, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 18, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4474251, rank: 24, write(sync,parallel): 0.2282881736755371 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 20, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 29, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 12, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 11, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 21, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 31, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 13, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 22, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 30, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 15, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 23, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 14, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.449072, rank: 11, write(sync,parallel): 0.24130868911743164 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4505403, rank: 12, write(sync,parallel): 0.24616789817810059 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.45335, rank: 17, write(sync,parallel): 0.23910284042358398 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4567423, rank: 15, write(sync,parallel): 0.2584383487701416 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4570522, rank: 22, write(sync,parallel): 0.25391292572021484 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.29s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4588482, rank: 18, write(sync,parallel): 0.24748444557189941 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 47206400, before: 1715978240, after: 1763184640 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4631872, rank: 19, write(sync,parallel): 0.2533280849456787 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4640925, rank: 20, write(sync,parallel): 0.24709010124206543 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4730005, rank: 16, write(sync,parallel): 0.24292755126953125 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.30s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4736042, rank: 5, write(sync,parallel): 0.30658984184265137 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.475535, rank: 21, write(sync,parallel): 0.2611987590789795 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4786904, rank: 30, write(sync,parallel): 0.19305086135864258 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4870772, rank: 8, write(sync,parallel): 0.3051295280456543 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.48733, rank: 4, write(sync,parallel): 0.32435035705566406 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.4959898, rank: 26, write(sync,parallel): 0.20820403099060059 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.33s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.31s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 32768, before: 1685716992, after: 1685749760 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.26s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.32s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.34s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.39s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.28s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.40s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.39s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109268992, before: 1691553792, after: 1800822784 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 109080576, before: 1707175936, after: 1816256512 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.5884562, rank: 7, write(sync,parallel): 0.42458319664001465 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.6311433, rank: 6, write(sync,parallel): 0.2796604633331299 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.51s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.35s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212496384, before: 1702215680, after: 1914712064 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.7422783, rank: 3, write(sync,parallel): 0.48589611053466797 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212185088, before: 2009014272, after: 2221199360 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.56s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.8167713, rank: 0, write(sync,parallel): 0.4675602912902832 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212267008, before: 1695756288, after: 1908023296 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.56s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 212250624, before: 1685745664, after: 1897996288 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.8854868, rank: 1, write(sync,parallel): 0.6166567802429199 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750544071.918553, rank: 2, write(sync,parallel): 0.4714083671569824 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.71s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.55s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.959679, 2, gather: 0.0024993419647216797 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9597197, 1, gather: 0.023923397064208984 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9615664, 10, gather: 0.49706554412841797 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9615757, 11, gather: 0.47223973274230957 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9615781, 9, gather: 0.5007326602935791 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9616191, 12, gather: 0.46923112869262695 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9610305, 23, gather: 0.4922518730163574 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9610536, 21, gather: 0.4433724880218506 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9616725, 13, gather: 0.5028314590454102 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9610608, 22, gather: 0.46334099769592285 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9610667, 20, gather: 0.46297407150268555 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9617848, 15, gather: 0.46313977241516113 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9611554, 19, gather: 0.46292972564697266 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9622278, 26, gather: 0.43276500701904297 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9618447, 14, gather: 0.4768390655517578 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9612038, 17, gather: 0.47069263458251953 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.961202, 16, gather: 0.4503355026245117 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9612155, 18, gather: 0.4625568389892578 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9610538, 3, gather: 0.17928481101989746 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9622962, 25, gather: 0.4839622974395752 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9620838, 8, gather: 0.42795634269714355 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9612498, 5, gather: 0.43734025955200195 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.96126, 6, gather: 0.28975796699523926 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9612622, 4, gather: 0.4318418502807617 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9623883, 27, gather: 0.4709594249725342 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9624064, 28, gather: 0.48739099502563477 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9613662, 7, gather: 0.3198826313018799 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9626012, 30, gather: 0.44995617866516113 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9626381, 29, gather: 0.4711453914642334 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.962695, 31, gather: 0.47961997985839844 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9628477, 24, gather: 0.4741489887237549 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9642284, 0, gather: 0.1024622917175293 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750544071.9766655, metadata_write: 0.012299776077270508 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4890s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4687s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4811s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0438s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4553s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4495s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1173s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0218s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3376s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.1974s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.3074s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5199s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5218s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4821s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4955s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4466s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4884s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4892s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4507s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4891s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5021s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5053s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4975s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4678s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4918s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4914s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4815s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5161s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4819s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4621s +DEBUG:megatron.training.checkpointing:rank: 29, takes 0.0026895999908447266 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 25, takes 0.002642393112182617 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 31, takes 0.002649068832397461 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 30, takes 0.0026717185974121094 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 26, takes 0.002686738967895508 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 28, takes 0.002663135528564453 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 24, takes 0.0026702880859375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 9, takes 0.002660989761352539 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 12, takes 0.002666950225830078 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 14, takes 0.002672910690307617 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 10, takes 0.002597332000732422 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 11, takes 0.002658367156982422 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 15, takes 0.002665281295776367 to finalize ckpt save +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.4822s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.5114s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/4, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 27, takes 0.0026161670684814453 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 8, takes 0.002624988555908203 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 13, takes 0.002693653106689453 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 16, takes 0.002690553665161133 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 17, takes 0.0027387142181396484 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 18, takes 0.0026590824127197266 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.002656221389770508 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.0026569366455078125 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.002635955810546875 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 19, takes 0.002721071243286133 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.0026624202728271484 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.002618551254272461 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.0026068687438964844 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0026183128356933594 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 22, takes 0.002773284912109375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 21, takes 0.00276947021484375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 20, takes 0.002793550491333008 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.002611875534057617 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 23, takes 0.0027616024017333984 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 10 +Done exporting trace 10 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (4323.00, 4323.44) +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 1.139357E+01 | lm loss PPL: 8.874905E+04 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor:batch tensor after cp: tokens torch.Size([8, 8192])tokens +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp:torch.Size([8, 65536]) loss_mask + torch.Size([8, 8192]) +batch tensor: batch tensor after cp:labels attention_mask torch.Size([8, 65536]) +torch.Size([8, 1, 8192, 65536]) +batch tensor:batch tensor after cp: loss_maskposition_ids torch.Size([8, 65536])torch.Size([8, 8192]) + +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor: tokens torch.Size([8, 65536]) +batch tensor: labels torch.Size([8, 65536]) +batch tensor: loss_mask torch.Size([8, 65536]) +batch tensor: attention_mask torch.Size([8, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([8, 65536]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +batch tensor after cp: tokens torch.Size([8, 8192]) +batch tensor after cp: labels torch.Size([8, 8192]) +batch tensor after cp: loss_mask torch.Size([8, 8192]) +batch tensor after cp: attention_mask torch.Size([8, 1, 8192, 65536]) +batch tensor after cp: position_ids torch.Size([8, 8192]) +Start exporting trace 11 +Done exporting trace 11 +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +(min, max) time across ranks (ms): + evaluate .......................................: (1351.81, 1352.23) +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 1.139357E+01 | lm loss PPL: 8.874905E+04 | +---------------------------------------------------------------------------------------------------------- +Running ctx_length=12288, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 12288 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 12288 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 12288 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 12288 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.044 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 4.979 seconds +time to initialize megatron (seconds): 13.006 +[after megatron is initialized] datetime: 2025-06-21 22:15:20 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> embedding +>>> decoder +>>> output_layer +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 + +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 190385152 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 190385152 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 190385152 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (190385152 elements, 190385152 padded size): + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (94.92, 95.63) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:15:21 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=12288, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.004859 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 5549 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001910 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 5546 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001680 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 5557 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:15:21 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (892.98, 914.78) + train/valid/test-data-iterators-setup ..........: (16.82, 143.74) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:15:21 +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens batch tensor after cp: tokens torch.Size([8, 12288]) +torch.Size([8, 98304])batch tensor after cp: +labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor:batch tensor after cp: labelsattention_mask torch.Size([8, 98304])torch.Size([8, 1, 12288, 98304]) + +batch tensor:batch tensor after cp: loss_maskposition_ids torch.Size([8, 98304])torch.Size([8, 12288]) + +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +batch tensor: tokens torch.Size([8, 98304]) +batch tensor: labels torch.Size([8, 98304]) +batch tensor: loss_mask torch.Size([8, 98304]) +batch tensor: attention_mask torch.Size([8, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([8, 98304]) +batch tensor after cp: tokens torch.Size([8, 12288]) +batch tensor after cp: labels torch.Size([8, 12288]) +batch tensor after cp: loss_mask torch.Size([8, 12288]) +batch tensor after cp: attention_mask torch.Size([8, 1, 12288, 98304]) +batch tensor after cp: position_ids torch.Size([8, 12288]) +Start exporting trace 0 +Done exporting trace 0 +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.1400 +Theoretical memory footprints: weight and optimizer=2403.18 MB +[Rank 9] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109816.0 | max reserved: 109816.0 +[Rank 13] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109892.0 | max reserved: 109892.0 +[Rank 10] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109816.0 | max reserved: 109816.0 +[Rank 12] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109220.0 | max reserved: 109220.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109604.0 | max reserved: 109604.0 + [2025-06-21 22:15:39] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 17908.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[Rank 17] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 110008.0 | max reserved: 110008.0 +[Rank 14] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109892.0 | max reserved: 109892.0 +[Rank 1] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109604.0 | max reserved: 109604.0 +[Rank 30] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 110260.0 | max reserved: 110260.0 +[Rank 29] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 110260.0 | max reserved: 110260.0 +[Rank 23] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109316.0 | max reserved: 109316.0 +[Rank 15] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109316.0 | max reserved: 109316.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109700.0 | max reserved: 109700.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 85297.28564453125 | max allocated: 105266.50048828125 | reserved: 108836.0 | max reserved: 108836.0 +[Rank 25] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109244.0 | max reserved: 109244.0 +[Rank 18] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 110008.0 | max reserved: 110008.0 +[Rank 8] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109048.0 | max reserved: 109048.0 +[Rank 11] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109144.0 | max reserved: 109144.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109700.0 | max reserved: 109700.0[Rank 7] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 108932.0 | max reserved: 108932.0 + +[Rank 3] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109124.0 | max reserved: 109124.0 +[Rank 5] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109700.0 | max reserved: 109700.0 +[Rank 26] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 110224.0 | max reserved: 110224.0 +[Rank 22] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 110084.0 | max reserved: 110084.0 +[Rank 24] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109244.0 | max reserved: 109244.0 +[Rank 21] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 110084.0 | max reserved: 110084.0 +[Rank 27] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109244.0 | max reserved: 109244.0 +[Rank 28] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109760.0 | max reserved: 109760.0 +[Rank 20] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 110084.0 | max reserved: 110084.0 +[Rank 31] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109760.0 | max reserved: 109760.0 +[Rank 19] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 109240.0 | max reserved: 109240.0 +[Rank 16] (after 1 iterations) memory (MB) | allocated: 85296.28564453125 | max allocated: 105266.50048828125 | reserved: 110008.0 | max reserved: 110008.0 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 53.31 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 53.31 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 53.37 GiB is free. Including non-PyTorch memory, this process has 86.43 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 211.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 53.37 GiB is free. Including non-PyTorch memory, this process has 86.43 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 211.98 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 53.42 GiB is free. Including non-PyTorch memory, this process has 86.38 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 53.34 GiB is free. Including non-PyTorch memory, this process has 86.46 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 231.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 53.42 GiB is free. Including non-PyTorch memory, this process has 86.38 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 53.34 GiB is free. Including non-PyTorch memory, this process has 86.46 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 231.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 53.42 GiB is free. Including non-PyTorch memory, this process has 86.38 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 53.42 GiB is free. Including non-PyTorch memory, this process has 86.38 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 53.31 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 52.58 GiB is free. Including non-PyTorch memory, this process has 87.21 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 1015.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 53.31 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 52.58 GiB is free. Including non-PyTorch memory, this process has 87.21 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 1015.98 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 53.35 GiB is free. Including non-PyTorch memory, this process has 86.44 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 211.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 53.35 GiB is free. Including non-PyTorch memory, this process has 86.44 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 211.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 52.49 GiB is free. Including non-PyTorch memory, this process has 87.31 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 1015.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 52.49 GiB is free. Including non-PyTorch memory, this process has 87.31 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 1015.98 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 53.34 GiB is free. Including non-PyTorch memory, this process has 86.46 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 231.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 53.34 GiB is free. Including non-PyTorch memory, this process has 86.46 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 231.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 53.43 GiB is free. Including non-PyTorch memory, this process has 86.37 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 53.43 GiB is free. Including non-PyTorch memory, this process has 86.37 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 53.28 GiB is free. Including non-PyTorch memory, this process has 86.52 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 307.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 53.28 GiB is free. Including non-PyTorch memory, this process has 86.52 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 307.98 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 53.42 GiB is free. Including non-PyTorch memory, this process has 86.38 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 53.42 GiB is free. Including non-PyTorch memory, this process has 86.38 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 53.26 GiB is free. Including non-PyTorch memory, this process has 86.54 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 307.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 53.26 GiB is free. Including non-PyTorch memory, this process has 86.54 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 307.98 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 53.42 GiB is free. Including non-PyTorch memory, this process has 86.38 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 53.42 GiB is free. Including non-PyTorch memory, this process has 86.38 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 53.33 GiB is free. Including non-PyTorch memory, this process has 86.47 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 53.43 GiB is free. Including non-PyTorch memory, this process has 86.37 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 53.43 GiB is free. Including non-PyTorch memory, this process has 86.37 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 53.32 GiB is free. Including non-PyTorch memory, this process has 86.48 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 53.28 GiB is free. Including non-PyTorch memory, this process has 86.52 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 307.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 53.28 GiB is free. Including non-PyTorch memory, this process has 86.52 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 307.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 53.35 GiB is free. Including non-PyTorch memory, this process has 86.45 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 231.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 53.35 GiB is free. Including non-PyTorch memory, this process has 86.45 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 231.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 53.43 GiB is free. Including non-PyTorch memory, this process has 86.37 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 72.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 53.43 GiB is free. Including non-PyTorch memory, this process has 86.37 GiB memory in use. Of the allocated memory 82.59 GiB is allocated by PyTorch, and 151.98 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=16384, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 16384 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +CTX_LENGTH: 16384 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 16384 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 16384 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 16384 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 16384 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 16384 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.045 seconds +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.500 seconds +time to initialize megatron (seconds): 8.919 +[after megatron is initialized] datetime: 2025-06-21 22:16:21 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer>>> embedding + +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 207162368 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 207162368 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (207162368 elements, 207162368 padded size): + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.position_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +(TP, PP, encoder TP, encoder PP) mismatch after resume ((4, 1, 0, 0) vs (1, 1, 0, 0) from checkpoint): RNG state will be ignored +(TP, PP, encoder TP, encoder PP) mismatch after resume ((4, 1, 0, 0) vs (1, 1, 0, 0) from checkpoint): Rerun state will be ignored + loading distributed checkpoint from gpt-checkpoint at iteration 10 +Running ctx_length=24576, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 24576 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 24576 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 24576 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 24576 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 24576 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 +INFO:megatron.training.initialize:Setting logging level to 0 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 +INFO:megatron.training.initialize:Setting logging level to 0 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 +INFO:megatron.training.initialize:Setting logging level to 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 24576 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 +INFO:megatron.training.initialize:Setting logging level to 0 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None +INFO:megatron.training.initialize:Setting logging level to 0 + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False +INFO:megatron.training.initialize:Setting logging level to 0 + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None +INFO:megatron.training.initialize:Setting logging level to 0 + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 +INFO:megatron.training.initialize:Setting logging level to 0 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 24576 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.048 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 3.002 seconds +time to initialize megatron (seconds): 9.430 +[after megatron is initialized] datetime: 2025-06-21 22:17:01 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 240716800 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 240716800 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 240716800 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 240716800 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 240716800 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 240716800 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 240716800 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 240716800 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 240716800 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (240716800 elements, 240716800 padded size): + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 240716800 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 240716800 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 240716800 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.83, 3.52) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:17:02 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=24576, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005423 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2774 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001701 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2773 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001453 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2778 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:17:03 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (1489.24, 1525.38) + train/valid/test-data-iterators-setup ..........: (17.12, 147.45) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:17:03 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.41 GiB is free. Including non-PyTorch memory, this process has 3.40 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.41 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.37 GiB is free. Including non-PyTorch memory, this process has 3.43 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 288.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.39 GiB is free. Including non-PyTorch memory, this process has 3.42 GiB memory in use. Of the allocated memory 1.82 GiB is allocated by PyTorch, and 77.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=32768, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 32768 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 32768 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 32768 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 32768 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 32768 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 32768 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False +INFO:megatron.training.initialize:Setting logging level to 0 + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 32768 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.060 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.616 seconds +time to initialize megatron (seconds): 8.636 +[after megatron is initialized] datetime: 2025-06-21 22:17:41 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 274271232 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 274271232 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 274271232 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 274271232 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 274271232 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 274271232 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (274271232 elements, 274271232 padded size): + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.embedding.word_embeddings.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (4.99, 5.41) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:17:43 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=32768, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.004450 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2081 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001684 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2080 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001424 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2083 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:17:43 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (1929.30, 1961.39) + train/valid/test-data-iterators-setup ..........: (15.56, 135.07) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:17:43 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.00 GiB is free. Including non-PyTorch memory, this process has 3.81 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB isWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) + reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 136.03 GiB is free. Including non-PyTorch memory, this process has 3.77 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 136.01 GiB is free. Including non-PyTorch memory, this process has 3.79 GiB memory in use. Of the allocated memory 2.08 GiB is allocated by PyTorch, and 195.60 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=40960, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 40960 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +CTX_LENGTH: 40960 +TP_SIZE: 4 +CP_SIZE: 8 +-------------------------------- +CTX_LENGTH: 40960 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 40960 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 40960 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 40960 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 40960 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.048 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.579 seconds +time to initialize megatron (seconds): 8.770 +[after megatron is initialized] datetime: 2025-06-21 22:18:22 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 307825664 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 307825664 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 307825664 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 307825664 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 307825664 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 307825664 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 307825664 +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 307825664 > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 307825664 + + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 307825664 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 307825664 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 307825664 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (307825664 elements, 307825664 padded size): + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 307825664 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.51, 4.83) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:18:24 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=40960, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.004589 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1664 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001708 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1664 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001456 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1667 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:18:24 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (2539.47, 2574.16) + train/valid/test-data-iterators-setup ..........: (21.06, 148.47) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:18:24 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is ['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.88 GiB is free. Including non-PyTorch memory, this process has 3.93 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.84 GiB is free. Including non-PyTorch memory, this process has 3.96 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.94 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 800.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.86 GiB is free. Including non-PyTorch memory, this process has 3.95 GiB memory in use. Of the allocated memory 2.34 GiB is allocated by PyTorch, and 88.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=49152, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 49152 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 49152 +TP_SIZE: 4 +CP_SIZE: 8 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 49152 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +CHECKPOINT_PATH: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 49152 +TP_SIZE: 4 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 32, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 4, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 49152 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 4 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 32 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 49152 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 49152 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 4 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 32 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 431 dummy tokens (new size: 50688) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 4 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.044 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.589 seconds +time to initialize megatron (seconds): 9.336 +[after megatron is initialized] datetime: 2025-06-21 22:19:03 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 341380096 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 341380096 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 341380096 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 341380096 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 341380096 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (341380096 elements, 341380096 padded size): + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.embedding.position_embeddings.weight + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (24.44, 25.09) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:19:06 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=49152, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005780 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1387 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001758 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1386 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001462 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1389 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:19:06 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (2779.67, 2805.70) + train/valid/test-data-iterators-setup ..........: (17.78, 138.15) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:19:06 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 135.60 GiB is free. Including non-PyTorch memory, this process has 4.21 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 135.56 GiB is free. Including non-PyTorch memory, this process has 4.24 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1152.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 135.58 GiB is free. Including non-PyTorch memory, this process has 4.22 GiB memory in use. Of the allocated memory 2.60 GiB is allocated by PyTorch, and 106.47 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=65536, TP_SIZE=4, CP_SIZE=8, BATCH_SIZE=8 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 65536 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +CTX_LENGTH: 65536 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 65536 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 65536 +TP_SIZE: 4 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3