diff --git "a/attnserver.run_attnserver.slurm.sh.343194.out.log" "b/attnserver.run_attnserver.slurm.sh.343194.out.log" --- "a/attnserver.run_attnserver.slurm.sh.343194.out.log" +++ "b/attnserver.run_attnserver.slurm.sh.343194.out.log" @@ -11099,3 +11099,1976 @@ make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/mega > compiling and loading fused kernels ... INFO:megatron.training.initialize:Setting logging level to 0 >>> done with compiling and loading fused kernels. Compilation time: 2.686 seconds +time to initialize megatron (seconds): 9.707 +[after megatron is initialized] datetime: 2025-06-21 20:53:28 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 78706176 +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + +>>> embedding +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 78706176 > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 78706176 + + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 78706176 +>>> embedding +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 78706176 +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 78706176 +>>> embedding +>>> decoder > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 78706176 + +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 78706176 +>>> embedding +>>> decoder +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 78706176 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +>>> embedding +>>> decoder +>>> output_layer +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 78706176 +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (78706176 elements, 78706176 padded size): + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 78706176 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.embedding.position_embeddings.weight + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 78706176 +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 78706176 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 78706176 +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (30.40, 31.11) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 20:53:29 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=2048, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.007929 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33296 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002826 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33281 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002577 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 33343 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 20:53:29 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (600.30, 633.86) + train/valid/test-data-iterators-setup ..........: (20.89, 146.17) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 20:53:29 +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor:batch tensor: tokenstokens torch.Size([32, 65536])torch.Size([32, 65536]) + +batch tensor:batch tensor: labelslabels torch.Size([32, 65536])torch.Size([32, 65536]) + +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor:batch tensor: loss_maskloss_mask torch.Size([32, 65536])torch.Size([32, 65536]) + +batch tensor: batch tensor:attention_mask attention_mask torch.Size([32, 1, 65536, 65536]) +torch.Size([32, 1, 65536, 65536]) +batch tensor: position_idsbatch tensor: position_idstorch.Size([32, 65536]) +torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +batch tensor: tokens torch.Size([32, 65536]) +batch tensor: labels torch.Size([32, 65536]) +batch tensor: loss_mask torch.Size([32, 65536]) +batch tensor: attention_mask torch.Size([32, 1, 65536, 65536]) +batch tensor: position_ids torch.Size([32, 65536]) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.59 GiB is free. Including non-PyTorch memory, this process has 130.21 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.63 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memorWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 9.62 GiB is free. Including non-PyTorch memory, this process has 130.18 GiB memory in use. Of the allocated memor['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 9.64 GiB is free. Including non-PyTorch memory, this process has 130.16 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +y 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 9.60 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 293, in get_batch\n batch = get_batch_on_this_cp_rank(batch)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/utils.py", line 1765, in get_batch_on_this_cp_rank\n val = val.index_select(seq_dim, index)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 16.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 9.61 GiB is free. Including non-PyTorch memory, this process has 130.20 GiB memory in use. Of the allocated memory 128.65 GiB is allocated by PyTorch, and 16.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=4096, TP_SIZE=8, CP_SIZE=8, BATCH_SIZE=32 +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 8 +CP_SIZE: 8 +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 8 +CP_SIZE: 8 +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 8 +CP_SIZE: 8 +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +CHECKPOINT_PATH: gpt-checkpoint +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 4096 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 64, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 8 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 4096 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 8 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 64 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 4096 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 4096 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 8 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 64 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 8 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.041 seconds +> compiling and loading fused kernels ... +INFO:megatron.training.initialize:Setting logging level to 0 +>>> done with compiling and loading fused kernels. Compilation time: 3.225 seconds +time to initialize megatron (seconds): 10.782 +[after megatron is initialized] datetime: 2025-06-21 20:54:09 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 87094784 +>>> embedding +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 87094784 +>>> embedding +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 87094784>>> embedding + +>>> decoder +>>> output_layer +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> decoder +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 87094784 +>>> embedding +>>> decoder +>>> embedding > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 87094784 + +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 87094784 +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 87094784 +>>> embedding +>>> decoder + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 87094784 +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 87094784 +>>> embedding>>> embedding + +>>> decoder>>> decoder + +>>> output_layer>>> output_layer + +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 87094784 +>>> embedding > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 87094784 + +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 87094784 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 87094784 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 87094784 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (87094784 elements, 87094784 padded size): + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (4.10, 4.41) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 20:54:10 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=4096, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005407 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16648 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002294 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16640 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.002128 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 16671 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 20:54:10 +done with setup ... +training ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (647.99, 682.50) + train/valid/test-data-iterators-setup ..........: (17.00, 137.57) +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 20:54:10 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB iWARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 137.54 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 137.50 GiB is free. Including non-PyTorch memory, this process has 2.31 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.52 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 137.55 GiB is free. Including non-PyTorch memory, this process has 2.25 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB i['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +s reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 137.51 GiB is free. Including non-PyTorch memory, this process has 2.29 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 512.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 137.53 GiB is free. Including non-PyTorch memory, this process has 2.27 GiB memory in use. Of the allocated memory 744.49 MiB is allocated by PyTorch, and 27.51 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=8192, TP_SIZE=8, CP_SIZE=8, BATCH_SIZE=32 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +Cleaning up checkpoint directory: gpt-checkpoint +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +Cleaning up checkpoint directory: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +-------------------------------- +CTX_LENGTH: 8192 +TP_SIZE: 8 +CP_SIZE: 8 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3