diff --git "a/attnserver.run_attnserver.slurm.sh.343262.err.log" "b/attnserver.run_attnserver.slurm.sh.343262.err.log" --- "a/attnserver.run_attnserver.slurm.sh.343262.err.log" +++ "b/attnserver.run_attnserver.slurm.sh.343262.err.log" @@ -215,3 +215,1960 @@ W0621 22:07:39.919000 3216603 site-packages/torch/distributed/run.py:766] ****** [rank0]: return io.open(self, mode, buffering, encoding, errors, newline) [rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ [rank0]: FileNotFoundError: [Errno 2] No such file or directory: 'gpt-checkpoint/iter_0000010/.metadata.tmp' +[rank0]:[W621 22:09:02.420333885 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 22:09:06.404000 3216603 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3216675 closing signal SIGTERM +W0621 22:09:06.407000 3216603 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3216676 closing signal SIGTERM +W0621 22:09:06.410000 3216603 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3216677 closing signal SIGTERM +W0621 22:09:06.412000 3216603 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3216678 closing signal SIGTERM +W0621 22:09:06.416000 3216603 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3216679 closing signal SIGTERM +W0621 22:09:06.445000 3216603 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3216680 closing signal SIGTERM +W0621 22:09:06.459000 3216603 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3216681 closing signal SIGTERM +E0621 22:09:08.442000 3216603 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 3216674) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_22:09:06 + host : fs-mbz-gpu-570 + rank : 0 (local_rank: 0) + exitcode : 1 (pid: 3216674) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=2048 ++ PROF_CTX_LENGTH=2048 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L2048*tp1.cp8.bs2.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L2048*tp1.cp8.bs2.json' ']' ++ echo 'Running ctx_length=2048, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=2' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343262 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-570:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 2048 --max-position-embeddings 2048 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 22:09:11.830000 3220243 site-packages/torch/distributed/run.py:766] +W0621 22:09:11.830000 3220243 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 22:09:11.830000 3220243 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 22:09:11.830000 3220243 site-packages/torch/distributed/run.py:766] ***************************************** +[rank4]:[W621 22:09:32.184335382 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 22:09:32.184596897 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 22:09:32.185157087 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 22:09:32.185208617 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 22:09:32.185278421 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 22:09:32.187776658 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 22:09:32.187931519 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 22:09:32.461693105 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]:[W621 22:10:06.053994139 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 22:10:06.133471248 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 22:10:06.153802155 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 22:10:06.163871227 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 22:10:06.184087182 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 22:10:06.193973629 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 22:10:06.194001994 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 22:10:07.606376966 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=4096 ++ PROF_CTX_LENGTH=4096 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L4096*tp1.cp8.bs2.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L4096*tp1.cp8.bs2.json' ']' ++ echo 'Running ctx_length=4096, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=2' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343262 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-570:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 4096 --max-position-embeddings 4096 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 22:10:13.853000 3223814 site-packages/torch/distributed/run.py:766] +W0621 22:10:13.853000 3223814 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 22:10:13.853000 3223814 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 22:10:13.853000 3223814 site-packages/torch/distributed/run.py:766] ***************************************** +[rank6]:[W621 22:10:35.913118861 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 22:10:35.914097074 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 22:10:35.914234306 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 22:10:35.914597827 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 22:10:35.914665437 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 22:10:35.914847651 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 22:10:35.915579119 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 22:10:35.050004379 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]:[W621 22:11:08.737162834 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 22:11:08.150151679 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 22:11:08.170267279 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 22:11:08.180311278 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 22:11:08.190398266 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 22:11:08.200389843 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 22:11:08.321071434 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 22:11:08.321157304 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=8192 ++ PROF_CTX_LENGTH=8192 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp1.cp8.bs2.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp1.cp8.bs2.json' ']' ++ echo 'Running ctx_length=8192, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=2' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343262 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-570:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 22:11:15.574000 3227319 site-packages/torch/distributed/run.py:766] +W0621 22:11:15.574000 3227319 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 22:11:15.574000 3227319 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 22:11:15.574000 3227319 site-packages/torch/distributed/run.py:766] ***************************************** +[rank2]:[W621 22:11:36.399760598 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 22:11:36.399769446 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 22:11:36.399777896 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 22:11:36.399800856 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 22:11:36.400143484 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 22:11:36.405979904 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 22:11:36.406425341 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 22:11:36.585295515 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]:[W621 22:12:14.178137339 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 22:12:14.295078291 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 22:12:14.304752864 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 22:12:14.314908186 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 22:12:14.315074949 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 22:12:14.325022764 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 22:12:14.325073369 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 22:12:15.697696735 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=12288 ++ PROF_CTX_LENGTH=12288 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L12288*tp1.cp8.bs2.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L12288*tp1.cp8.bs2.json' ']' ++ echo 'Running ctx_length=12288, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=2' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343262 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-570:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 12288 --max-position-embeddings 12288 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 22:12:22.212000 3230830 site-packages/torch/distributed/run.py:766] +W0621 22:12:22.212000 3230830 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 22:12:22.212000 3230830 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 22:12:22.212000 3230830 site-packages/torch/distributed/run.py:766] ***************************************** +[rank5]:[W621 22:12:43.972749454 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 22:12:43.972753054 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 22:12:43.972771557 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 22:12:43.972797165 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 22:12:43.972823234 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 22:12:43.972890576 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 22:12:43.972978202 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 22:12:43.110582755 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain +[rank0]: save_checkpoint( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint +[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy, +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save +[rank0]: sharded_strategy.save(sharded_state_dict, checkpoint_dir) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save +[rank0]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save +[rank0]: async_calls.maybe_finalize_async_calls(blocking=True) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls +[rank0]: finalize_fn() +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn +[rank0]: save_state_dict_async_finalize(*save_state_dict_ret) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 243, in save_state_dict_async_finalize +[rank0]: storage_writer.finish(global_metadata, all_results) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 483, in finish +[rank0]: super().finish(metadata, results) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 697, in finish +[rank0]: with self.fs.create_stream(tmp_path, "wb") as metadata_file: +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/contextlib.py", line 137, in __enter__ +[rank0]: return next(self.gen) +[rank0]: ^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/filesystem.py", line 476, in create_stream +[rank0]: with path.open(mode) as stream: +[rank0]: ^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/pathlib.py", line 1013, in open +[rank0]: return io.open(self, mode, buffering, encoding, errors, newline) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: FileNotFoundError: [Errno 2] No such file or directory: 'gpt-checkpoint/iter_0000010/.metadata.tmp' +[rank0]:[W621 22:13:52.053566006 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 22:14:01.442000 3230830 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3230902 closing signal SIGTERM +W0621 22:14:01.444000 3230830 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3230903 closing signal SIGTERM +W0621 22:14:01.448000 3230830 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3230904 closing signal SIGTERM +W0621 22:14:01.454000 3230830 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3230905 closing signal SIGTERM +W0621 22:14:01.473000 3230830 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3230906 closing signal SIGTERM +W0621 22:14:01.487000 3230830 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3230907 closing signal SIGTERM +W0621 22:14:01.491000 3230830 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3230908 closing signal SIGTERM +E0621 22:14:05.726000 3230830 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 3230901) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_22:14:01 + host : fs-mbz-gpu-570 + rank : 0 (local_rank: 0) + exitcode : 1 (pid: 3230901) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=16384 ++ PROF_CTX_LENGTH=16384 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp1.cp8.bs2.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L16384*tp1.cp8.bs2.json' ']' ++ echo 'Running ctx_length=16384, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=2' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343262 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-570:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 16384 --max-position-embeddings 16384 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 22:14:09.094000 3234079 site-packages/torch/distributed/run.py:766] +W0621 22:14:09.094000 3234079 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 22:14:09.094000 3234079 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 22:14:09.094000 3234079 site-packages/torch/distributed/run.py:766] ***************************************** +[rank4]:[W621 22:14:32.698753152 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 22:14:32.698755888 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 22:14:32.698787897 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 22:14:32.698791466 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 22:14:32.698822632 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 22:14:32.698853216 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 22:14:32.698883948 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 22:14:32.846073255 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py:915: FutureWarning: `load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead. + checkpoint.load_state_dict( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/planner_helpers.py:406: FutureWarning: Please use DTensor instead and we are deprecating ShardedTensor. + device = getattr(value, "device", None) +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank0]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank0]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank0]: ^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank0]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank0]: return _load_global_dist_base_checkpoint( +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank0]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank0]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank0]: checkpoint.load_state_dict( +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank0]: return arg(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank0]: return _load_state_dict( +[rank0]: ^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank0]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank0]: raise result +[rank0]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank0]: Traceback (most recent call last): (RANK 0) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 1) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 2) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 3) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 4) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 5) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 6) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank0]: Traceback (most recent call last): (RANK 7) +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank0]: local_data = map_fun() +[rank0]: ^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank0]: result = func(*args, **kwargs) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank0]: local_plan = planner.create_local_plan() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank0]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank0]: raise CheckpointingException(_msg) +[rank0]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight + +[rank1]: Traceback (most recent call last): +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank1]: pretrain( +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank1]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank1]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank1]: ^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank1]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank1]: return _load_global_dist_base_checkpoint( +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank1]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank1]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank1]: checkpoint.load_state_dict( +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank1]: return arg(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank1]: return _load_state_dict( +[rank1]: ^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank1]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank1]: raise result +[rank1]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank1]: Traceback (most recent call last): (RANK 0) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 1) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 2) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 3) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 4) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 5) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 6) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank1]: Traceback (most recent call last): (RANK 7) +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank1]: local_data = map_fun() +[rank1]: ^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank1]: result = func(*args, **kwargs) +[rank1]: ^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank1]: local_plan = planner.create_local_plan() +[rank1]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank1]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank1]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank1]: raise CheckpointingException(_msg) +[rank1]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight + +[rank3]: Traceback (most recent call last): +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank3]: pretrain( +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank3]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank3]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank3]: ^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank3]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank3]: return _load_global_dist_base_checkpoint( +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank3]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank3]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank3]: checkpoint.load_state_dict( +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank3]: return arg(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank3]: return _load_state_dict( +[rank3]: ^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank3]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank3]: raise result +[rank3]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank3]: Traceback (most recent call last): (RANK 0) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 1) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 2) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 3) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 4) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 5) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 6) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank3]: Traceback (most recent call last): (RANK 7) +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank3]: local_data = map_fun() +[rank3]: ^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank3]: result = func(*args, **kwargs) +[rank3]: ^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank3]: local_plan = planner.create_local_plan() +[rank3]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank3]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank3]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank3]: raise CheckpointingException(_msg) +[rank3]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight + +[rank7]: Traceback (most recent call last): +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank7]: pretrain( +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank7]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank7]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank7]: ^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank7]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank7]: return _load_global_dist_base_checkpoint( +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank7]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank7]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank7]: checkpoint.load_state_dict( +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank7]: return arg(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank7]: return _load_state_dict( +[rank7]: ^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank7]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank7]: raise result +[rank7]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank7]: Traceback (most recent call last): (RANK 0) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 1) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 2) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 3) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 4) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 5) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 6) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank7]: Traceback (most recent call last): (RANK 7) +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank7]: local_data = map_fun() +[rank7]: ^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank7]: result = func(*args, **kwargs) +[rank7]: ^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank7]: local_plan = planner.create_local_plan() +[rank7]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank7]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank7]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank7]: raise CheckpointingException(_msg) +[rank7]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight + +[rank5]: Traceback (most recent call last): +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank5]: pretrain( +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank5]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank5]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank5]: ^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank5]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank5]: return _load_global_dist_base_checkpoint( +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank5]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank5]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank5]: checkpoint.load_state_dict( +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank5]: return arg(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank5]: return _load_state_dict( +[rank5]: ^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank5]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank5]: raise result +[rank5]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank5]: Traceback (most recent call last): (RANK 0) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 1) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 2) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 3) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 4) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 5) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 6) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank5]: Traceback (most recent call last): (RANK 7) +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank5]: local_data = map_fun() +[rank5]: ^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank5]: result = func(*args, **kwargs) +[rank5]: ^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank5]: local_plan = planner.create_local_plan() +[rank5]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank5]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank5]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank5]: raise CheckpointingException(_msg) +[rank5]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight + +[rank2]: Traceback (most recent call last): +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank2]: pretrain( +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank2]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank2]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank2]: ^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank2]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank2]: return _load_global_dist_base_checkpoint( +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank2]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank2]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank2]: checkpoint.load_state_dict( +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank2]: return arg(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank2]: return _load_state_dict( +[rank2]: ^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank2]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank2]: raise result +[rank2]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank2]: Traceback (most recent call last): (RANK 0) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 1) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 2) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 3) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1[rank6]: Traceback (most recent call last): +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank6]: pretrain( +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank6]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank6]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank6]: ^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank6]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank6]: return _load_global_dist_base_checkpoint( +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank6]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank6]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank6]: checkpoint.load_state_dict( +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank6]: return arg(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank6]: return _load_state_dict( +[rank6]: ^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank6]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank6]: raise result +[rank6]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank6]: Traceback (most recent call last): (RANK 0) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 1) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 2) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 3) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1[rank4]: Traceback (most recent call last): +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank4]: pretrain( +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 805, in pretrain +[rank4]: model, optimizer, opt_param_scheduler = setup_model_and_optimizer( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 1283, in setup_model_and_optimizer +[rank4]: args.iteration, args.num_floating_point_operations_so_far = load_checkpoint( +[rank4]: ^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 1374, in load_checkpoint +[rank4]: state_dict, checkpoint_name, release, ckpt_type = _load_base_checkpoint( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 974, in _load_base_checkpoint +[rank4]: return _load_global_dist_base_checkpoint( +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 870, in _load_global_dist_base_checkpoint +[rank4]: state_dict = dist_checkpointing.load(sharded_state_dict, checkpoint_name, load_strategy, strict=args.dist_ckpt_strictness) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 148, in load +[rank4]: loaded_state_dict = sharded_strategy.load(sharded_state_dict, checkpoint_dir) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 915, in load +[rank4]: checkpoint.load_state_dict( +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper +[rank4]: return arg(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 41, in load_state_dict +[rank4]: return _load_state_dict( +[rank4]: ^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 234, in _load_state_dict +[rank4]: central_plan: LoadPlan = distW.reduce_scatter("plan", local_step, global_step) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 219, in reduce_scatter +[rank4]: raise result +[rank4]: torch.distributed.checkpoint.api.CheckpointException: CheckpointException ranks:dict_keys([0, 1, 2, 3, 4, 5, 6, 7]) +[rank4]: Traceback (most recent call last): (RANK 0) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 4) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 5) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_che2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 4) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 5) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_che/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 1) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 2) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 3) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.1ckpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 6) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank2]: Traceback (most recent call last): (RANK 7) +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank2]: local_data = map_fun() +[rank2]: ^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank2]: result = func(*args, **kwargs) +[rank2]: ^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank2]: local_plan = planner.create_local_plan() +[rank2]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank2]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank2]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank2]: raise CheckpointingException(_msg) +[rank2]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight + +ckpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 6) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank6]: Traceback (most recent call last): (RANK 7) +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank6]: local_data = map_fun() +[rank6]: ^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank6]: result = func(*args, **kwargs) +[rank6]: ^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank6]: local_plan = planner.create_local_plan() +[rank6]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank6]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank6]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank6]: raise CheckpointingException(_msg) +[rank6]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight + +2/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 4) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 5) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 6) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight +[rank4]: Traceback (most recent call last): (RANK 7) +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/utils.py", line 192, in reduce_scatter +[rank4]: local_data = map_fun() +[rank4]: ^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/logger.py", line 87, in wrapper +[rank4]: result = func(*args, **kwargs) +[rank4]: ^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/checkpoint/state_dict_loader.py", line 223, in local_step +[rank4]: local_plan = planner.create_local_plan() +[rank4]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 605, in create_local_plan +[rank4]: self._validate_global_shapes(self.metadata, self.shapes_validation_sharded_tensors) +[rank4]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 576, in _validate_global_shapes +[rank4]: raise CheckpointingException(_msg) +[rank4]: megatron.core.dist_checkpointing.core.CheckpointingException: Global shape mismatch for loaded (torch.Size([2048, 4096])) and expected ((16384, 4096)) tensor for key embedding.position_embeddings.weight + +[rank3]:[W621 22:14:46.874909927 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 22:14:46.005730845 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 22:14:46.076405889 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 22:14:46.096739180 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 22:14:46.157074220 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank0]:[W621 22:14:46.206976838 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 22:14:46.217487880 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 22:14:46.217493045 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +W0621 22:14:47.389000 3234079 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3234150 closing signal SIGTERM +W0621 22:14:47.390000 3234079 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3234151 closing signal SIGTERM +W0621 22:14:47.390000 3234079 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3234152 closing signal SIGTERM +W0621 22:14:47.390000 3234079 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3234153 closing signal SIGTERM +W0621 22:14:47.391000 3234079 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3234154 closing signal SIGTERM +W0621 22:14:47.391000 3234079 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3234155 closing signal SIGTERM +W0621 22:14:47.392000 3234079 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3234156 closing signal SIGTERM +E0621 22:14:48.034000 3234079 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 7 (pid: 3234157) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +Traceback (most recent call last): + File "", line 198, in _run_module_as_main + File "", line 88, in _run_code + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in + main() + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper + return arg(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main + launch(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch + run(args) + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( +torch.distributed.elastic.multiprocessing.errors.ChildFailedError: +============================================================ +./pretrain_gpt_profile.py FAILED +------------------------------------------------------------ +Failures: + +------------------------------------------------------------ +Root Cause (first observed failure): +[0]: + time : 2025-06-21_22:14:47 + host : fs-mbz-gpu-570 + rank : 7 (local_rank: 7) + exitcode : 1 (pid: 3234157) + error_file: + traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html +============================================================ ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=24576 ++ PROF_CTX_LENGTH=24576 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L24576*tp1.cp8.bs2.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L24576*tp1.cp8.bs2.json' ']' ++ echo 'Running ctx_length=24576, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=2' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343262 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-570:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 24576 --max-position-embeddings 24576 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 22:14:50.999000 3235942 site-packages/torch/distributed/run.py:766] +W0621 22:14:50.999000 3235942 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 22:14:50.999000 3235942 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 22:14:50.999000 3235942 site-packages/torch/distributed/run.py:766] ***************************************** +[rank7]:[W621 22:15:13.959945527 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank6]:[W621 22:15:13.962492268 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 22:15:13.962530690 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 22:15:13.962580100 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 22:15:13.962580097 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 22:15:13.962692842 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 22:15:13.962758128 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 22:15:13.103434945 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]:[W621 22:16:29.420197805 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank1]:[W621 22:16:30.695695384 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank3]:[W621 22:16:30.712051338 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank4]:[W621 22:16:30.712174606 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank5]:[W621 22:16:30.722197487 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank6]:[W621 22:16:30.722495525 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank2]:[W621 22:16:30.078182261 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) +[rank7]:[W621 22:16:30.158718676 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator()) ++ set +x ++ for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072 ++ export PROF_CTX_LENGTH=32768 ++ PROF_CTX_LENGTH=32768 ++ name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L32768*tp1.cp8.bs2.json' ++ '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L32768*tp1.cp8.bs2.json' ']' ++ echo 'Running ctx_length=32768, TP_SIZE=1, CP_SIZE=8, BATCH_SIZE=2' ++ srun bash ./attnserver.sh ++ which python3 ++ python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 1 --node_rank 0 --rdzv_id 343262 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-570:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 1 --context-parallel-size 8 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 32768 --max-position-embeddings 32768 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/ +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + main() +W0621 22:16:48.407000 3239634 site-packages/torch/distributed/run.py:766] +W0621 22:16:48.407000 3239634 site-packages/torch/distributed/run.py:766] ***************************************** +W0621 22:16:48.407000 3239634 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +W0621 22:16:48.407000 3239634 site-packages/torch/distributed/run.py:766] ***************************************** +[rank6]:[W621 22:17:10.039751118 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank2]:[W621 22:17:10.039779342 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank4]:[W621 22:17:10.039797144 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank1]:[W621 22:17:10.039823404 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank3]:[W621 22:17:10.039858634 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank7]:[W621 22:17:10.040080121 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank5]:[W621 22:17:10.041021608 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +[rank0]:[W621 22:17:10.179828934 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device. +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect. + warnings.warn( +[rank0]: FileNotFoundError: [Errno 2] No such file or directory: 'gpt-checkpoint/iter_0000010/__0_0.distcp' + +[rank0]: The above exception was the direct cause of the following exception: + +[rank0]: Traceback (most recent call last): +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 554, in +[rank0]: pretrain( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/training.py", line 879, in pretrain +[rank0]: save_checkpoint( +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/training/checkpointing.py", line 469, in save_checkpoint +[rank0]: async_save_request = dist_checkpointing.save(state_dict, checkpoint_name, save_strategy, +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/serialization.py", line 404, in save +[rank0]: sharded_strategy.save(sharded_state_dict, checkpoint_dir) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/fully_parallel.py", line 95, in save +[rank0]: return self.base_strategy.save(sharded_state_dict, checkpoint_dir) +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/base.py", line 228, in save +[rank0]: async_calls.maybe_finalize_async_calls(blocking=True) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/async_utils.py", line 545, in maybe_finalize_async_calls +[rank0]: finalize_fn() +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/torch.py", line 800, in finalize_fn +[rank0]: save_state_dict_async_finalize(*save_state_dict_ret) +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/state_dict_saver.py", line 229, in save_state_dict_async_finalize +[rank0]: write_results = storage_writer.retrieve_write_results() +[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +[rank0]: File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/dist_checkpointing/strategies/filesystem_async.py", line 436, in retrieve_write_results +[rank0]: raise RuntimeError(f'Worker failure: {write_results_or_exc}') from write_results_or_exc +[rank0]: RuntimeError: Worker failure: [Errno 2] No such file or directory: 'gpt-checkpoint/iter_0000010/__0_0.distcp'